opal-irqchip.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * This file implements an irqchip for OPAL events. Whenever there is
  3. * an interrupt that is handled by OPAL we get passed a list of events
  4. * that Linux needs to do something about. These basically look like
  5. * interrupts to Linux so we implement an irqchip to handle them.
  6. *
  7. * Copyright Alistair Popple, IBM Corporation 2014.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. */
  14. #include <linux/bitops.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqchip.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/kthread.h>
  23. #include <linux/delay.h>
  24. #include <linux/slab.h>
  25. #include <linux/irq_work.h>
  26. #include <asm/machdep.h>
  27. #include <asm/opal.h>
  28. #include "powernv.h"
  29. /* Maximum number of events supported by OPAL firmware */
  30. #define MAX_NUM_EVENTS 64
  31. struct opal_event_irqchip {
  32. struct irq_chip irqchip;
  33. struct irq_domain *domain;
  34. unsigned long mask;
  35. };
  36. static struct opal_event_irqchip opal_event_irqchip;
  37. static unsigned int opal_irq_count;
  38. static unsigned int *opal_irqs;
  39. static void opal_handle_irq_work(struct irq_work *work);
  40. static u64 last_outstanding_events;
  41. static struct irq_work opal_event_irq_work = {
  42. .func = opal_handle_irq_work,
  43. };
  44. void opal_handle_events(uint64_t events)
  45. {
  46. int virq, hwirq = 0;
  47. u64 mask = opal_event_irqchip.mask;
  48. if (!in_irq() && (events & mask)) {
  49. last_outstanding_events = events;
  50. irq_work_queue(&opal_event_irq_work);
  51. return;
  52. }
  53. while (events & mask) {
  54. hwirq = fls64(events) - 1;
  55. if (BIT_ULL(hwirq) & mask) {
  56. virq = irq_find_mapping(opal_event_irqchip.domain,
  57. hwirq);
  58. if (virq)
  59. generic_handle_irq(virq);
  60. }
  61. events &= ~BIT_ULL(hwirq);
  62. }
  63. }
  64. static void opal_event_mask(struct irq_data *d)
  65. {
  66. clear_bit(d->hwirq, &opal_event_irqchip.mask);
  67. }
  68. static void opal_event_unmask(struct irq_data *d)
  69. {
  70. __be64 events;
  71. set_bit(d->hwirq, &opal_event_irqchip.mask);
  72. opal_poll_events(&events);
  73. last_outstanding_events = be64_to_cpu(events);
  74. /*
  75. * We can't just handle the events now with opal_handle_events().
  76. * If we did we would deadlock when opal_event_unmask() is called from
  77. * handle_level_irq() with the irq descriptor lock held, because
  78. * calling opal_handle_events() would call generic_handle_irq() and
  79. * then handle_level_irq() which would try to take the descriptor lock
  80. * again. Instead queue the events for later.
  81. */
  82. if (last_outstanding_events & opal_event_irqchip.mask)
  83. /* Need to retrigger the interrupt */
  84. irq_work_queue(&opal_event_irq_work);
  85. }
  86. static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
  87. {
  88. /*
  89. * For now we only support level triggered events. The irq
  90. * handler will be called continuously until the event has
  91. * been cleared in OPAL.
  92. */
  93. if (flow_type != IRQ_TYPE_LEVEL_HIGH)
  94. return -EINVAL;
  95. return 0;
  96. }
  97. static struct opal_event_irqchip opal_event_irqchip = {
  98. .irqchip = {
  99. .name = "OPAL EVT",
  100. .irq_mask = opal_event_mask,
  101. .irq_unmask = opal_event_unmask,
  102. .irq_set_type = opal_event_set_type,
  103. },
  104. .mask = 0,
  105. };
  106. static int opal_event_map(struct irq_domain *d, unsigned int irq,
  107. irq_hw_number_t hwirq)
  108. {
  109. irq_set_chip_data(irq, &opal_event_irqchip);
  110. irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip,
  111. handle_level_irq);
  112. return 0;
  113. }
  114. static irqreturn_t opal_interrupt(int irq, void *data)
  115. {
  116. __be64 events;
  117. opal_handle_interrupt(virq_to_hw(irq), &events);
  118. opal_handle_events(be64_to_cpu(events));
  119. return IRQ_HANDLED;
  120. }
  121. static void opal_handle_irq_work(struct irq_work *work)
  122. {
  123. opal_handle_events(last_outstanding_events);
  124. }
  125. static int opal_event_match(struct irq_domain *h, struct device_node *node,
  126. enum irq_domain_bus_token bus_token)
  127. {
  128. return irq_domain_get_of_node(h) == node;
  129. }
  130. static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
  131. const u32 *intspec, unsigned int intsize,
  132. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  133. {
  134. *out_hwirq = intspec[0];
  135. *out_flags = IRQ_TYPE_LEVEL_HIGH;
  136. return 0;
  137. }
  138. static const struct irq_domain_ops opal_event_domain_ops = {
  139. .match = opal_event_match,
  140. .map = opal_event_map,
  141. .xlate = opal_event_xlate,
  142. };
  143. void opal_event_shutdown(void)
  144. {
  145. unsigned int i;
  146. /* First free interrupts, which will also mask them */
  147. for (i = 0; i < opal_irq_count; i++) {
  148. if (opal_irqs[i])
  149. free_irq(opal_irqs[i], NULL);
  150. opal_irqs[i] = 0;
  151. }
  152. }
  153. int __init opal_event_init(void)
  154. {
  155. struct device_node *dn, *opal_node;
  156. const __be32 *irqs;
  157. int i, irqlen, rc = 0;
  158. opal_node = of_find_node_by_path("/ibm,opal");
  159. if (!opal_node) {
  160. pr_warn("opal: Node not found\n");
  161. return -ENODEV;
  162. }
  163. /* If dn is NULL it means the domain won't be linked to a DT
  164. * node so therefore irq_of_parse_and_map(...) wont work. But
  165. * that shouldn't be problem because if we're running a
  166. * version of skiboot that doesn't have the dn then the
  167. * devices won't have the correct properties and will have to
  168. * fall back to the legacy method (opal_event_request(...))
  169. * anyway. */
  170. dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event");
  171. opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS,
  172. &opal_event_domain_ops, &opal_event_irqchip);
  173. of_node_put(dn);
  174. if (!opal_event_irqchip.domain) {
  175. pr_warn("opal: Unable to create irq domain\n");
  176. rc = -ENOMEM;
  177. goto out;
  178. }
  179. /* Get interrupt property */
  180. irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
  181. opal_irq_count = irqs ? (irqlen / 4) : 0;
  182. pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
  183. /* Install interrupt handlers */
  184. opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL);
  185. for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
  186. unsigned int irq, virq;
  187. /* Get hardware and virtual IRQ */
  188. irq = be32_to_cpup(irqs);
  189. virq = irq_create_mapping(NULL, irq);
  190. if (!virq) {
  191. pr_warn("Failed to map irq 0x%x\n", irq);
  192. continue;
  193. }
  194. /* Install interrupt handler */
  195. rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
  196. "opal", NULL);
  197. if (rc) {
  198. irq_dispose_mapping(virq);
  199. pr_warn("Error %d requesting irq %d (0x%x)\n",
  200. rc, virq, irq);
  201. continue;
  202. }
  203. /* Cache IRQ */
  204. opal_irqs[i] = virq;
  205. }
  206. out:
  207. of_node_put(opal_node);
  208. return rc;
  209. }
  210. machine_arch_initcall(powernv, opal_event_init);
  211. /**
  212. * opal_event_request(unsigned int opal_event_nr) - Request an event
  213. * @opal_event_nr: the opal event number to request
  214. *
  215. * This routine can be used to find the linux virq number which can
  216. * then be passed to request_irq to assign a handler for a particular
  217. * opal event. This should only be used by legacy devices which don't
  218. * have proper device tree bindings. Most devices should use
  219. * irq_of_parse_and_map() instead.
  220. */
  221. int opal_event_request(unsigned int opal_event_nr)
  222. {
  223. if (WARN_ON_ONCE(!opal_event_irqchip.domain))
  224. return 0;
  225. return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr);
  226. }
  227. EXPORT_SYMBOL(opal_event_request);