ehv_pic.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * Driver for ePAPR Embedded Hypervisor PIC
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Author: Ashish Kalra <ashish.kalra@freescale.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public License
  9. * version 2. This program is licensed "as is" without any warranty of any
  10. * kind, whether express or implied.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/irq.h>
  16. #include <linux/smp.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/of.h>
  21. #include <asm/io.h>
  22. #include <asm/irq.h>
  23. #include <asm/smp.h>
  24. #include <asm/machdep.h>
  25. #include <asm/ehv_pic.h>
  26. #include <asm/fsl_hcalls.h>
  27. #include "../../../kernel/irq/settings.h"
  28. static struct ehv_pic *global_ehv_pic;
  29. static DEFINE_SPINLOCK(ehv_pic_lock);
  30. static u32 hwirq_intspec[NR_EHV_PIC_INTS];
  31. static u32 __iomem *mpic_percpu_base_vaddr;
  32. #define IRQ_TYPE_MPIC_DIRECT 4
  33. #define MPIC_EOI 0x00B0
  34. /*
  35. * Linux descriptor level callbacks
  36. */
  37. void ehv_pic_unmask_irq(struct irq_data *d)
  38. {
  39. unsigned int src = virq_to_hw(d->irq);
  40. ev_int_set_mask(src, 0);
  41. }
  42. void ehv_pic_mask_irq(struct irq_data *d)
  43. {
  44. unsigned int src = virq_to_hw(d->irq);
  45. ev_int_set_mask(src, 1);
  46. }
  47. void ehv_pic_end_irq(struct irq_data *d)
  48. {
  49. unsigned int src = virq_to_hw(d->irq);
  50. ev_int_eoi(src);
  51. }
  52. void ehv_pic_direct_end_irq(struct irq_data *d)
  53. {
  54. out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
  55. }
  56. int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
  57. bool force)
  58. {
  59. unsigned int src = virq_to_hw(d->irq);
  60. unsigned int config, prio, cpu_dest;
  61. int cpuid = irq_choose_cpu(dest);
  62. unsigned long flags;
  63. spin_lock_irqsave(&ehv_pic_lock, flags);
  64. ev_int_get_config(src, &config, &prio, &cpu_dest);
  65. ev_int_set_config(src, config, prio, cpuid);
  66. spin_unlock_irqrestore(&ehv_pic_lock, flags);
  67. return 0;
  68. }
  69. static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
  70. {
  71. /* Now convert sense value */
  72. switch (type & IRQ_TYPE_SENSE_MASK) {
  73. case IRQ_TYPE_EDGE_RISING:
  74. return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
  75. EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
  76. case IRQ_TYPE_EDGE_FALLING:
  77. case IRQ_TYPE_EDGE_BOTH:
  78. return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
  79. EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
  80. case IRQ_TYPE_LEVEL_HIGH:
  81. return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
  82. EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
  83. case IRQ_TYPE_LEVEL_LOW:
  84. default:
  85. return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
  86. EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
  87. }
  88. }
  89. int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
  90. {
  91. unsigned int src = virq_to_hw(d->irq);
  92. struct irq_desc *desc = irq_to_desc(d->irq);
  93. unsigned int vecpri, vold, vnew, prio, cpu_dest;
  94. unsigned long flags;
  95. if (flow_type == IRQ_TYPE_NONE)
  96. flow_type = IRQ_TYPE_LEVEL_LOW;
  97. irq_settings_clr_level(desc);
  98. irq_settings_set_trigger_mask(desc, flow_type);
  99. if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
  100. irq_settings_set_level(desc);
  101. vecpri = ehv_pic_type_to_vecpri(flow_type);
  102. spin_lock_irqsave(&ehv_pic_lock, flags);
  103. ev_int_get_config(src, &vold, &prio, &cpu_dest);
  104. vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
  105. EHV_PIC_INFO(VECPRI_SENSE_MASK));
  106. vnew |= vecpri;
  107. /*
  108. * TODO : Add specific interface call for platform to set
  109. * individual interrupt priorities.
  110. * platform currently using static/default priority for all ints
  111. */
  112. prio = 8;
  113. ev_int_set_config(src, vecpri, prio, cpu_dest);
  114. spin_unlock_irqrestore(&ehv_pic_lock, flags);
  115. return 0;
  116. }
  117. static struct irq_chip ehv_pic_irq_chip = {
  118. .irq_mask = ehv_pic_mask_irq,
  119. .irq_unmask = ehv_pic_unmask_irq,
  120. .irq_eoi = ehv_pic_end_irq,
  121. .irq_set_type = ehv_pic_set_irq_type,
  122. };
  123. static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
  124. .irq_mask = ehv_pic_mask_irq,
  125. .irq_unmask = ehv_pic_unmask_irq,
  126. .irq_eoi = ehv_pic_direct_end_irq,
  127. .irq_set_type = ehv_pic_set_irq_type,
  128. };
  129. /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
  130. unsigned int ehv_pic_get_irq(void)
  131. {
  132. int irq;
  133. BUG_ON(global_ehv_pic == NULL);
  134. if (global_ehv_pic->coreint_flag)
  135. irq = mfspr(SPRN_EPR); /* if core int mode */
  136. else
  137. ev_int_iack(0, &irq); /* legacy mode */
  138. if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
  139. return NO_IRQ;
  140. /*
  141. * this will also setup revmap[] in the slow path for the first
  142. * time, next calls will always use fast path by indexing revmap
  143. */
  144. return irq_linear_revmap(global_ehv_pic->irqhost, irq);
  145. }
  146. static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
  147. {
  148. /* Exact match, unless ehv_pic node is NULL */
  149. return h->of_node == NULL || h->of_node == node;
  150. }
  151. static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
  152. irq_hw_number_t hw)
  153. {
  154. struct ehv_pic *ehv_pic = h->host_data;
  155. struct irq_chip *chip;
  156. /* Default chip */
  157. chip = &ehv_pic->hc_irq;
  158. if (mpic_percpu_base_vaddr)
  159. if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
  160. chip = &ehv_pic_direct_eoi_irq_chip;
  161. irq_set_chip_data(virq, chip);
  162. /*
  163. * using handle_fasteoi_irq as our irq handler, this will
  164. * only call the eoi callback and suitable for the MPIC
  165. * controller which set ISR/IPR automatically and clear the
  166. * highest priority active interrupt in ISR/IPR when we do
  167. * a specific eoi
  168. */
  169. irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
  170. /* Set default irq type */
  171. irq_set_irq_type(virq, IRQ_TYPE_NONE);
  172. return 0;
  173. }
  174. static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
  175. const u32 *intspec, unsigned int intsize,
  176. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  177. {
  178. /*
  179. * interrupt sense values coming from the guest device tree
  180. * interrupt specifiers can have four possible sense and
  181. * level encoding information and they need to
  182. * be translated between firmware type & linux type.
  183. */
  184. static unsigned char map_of_senses_to_linux_irqtype[4] = {
  185. IRQ_TYPE_EDGE_FALLING,
  186. IRQ_TYPE_EDGE_RISING,
  187. IRQ_TYPE_LEVEL_LOW,
  188. IRQ_TYPE_LEVEL_HIGH,
  189. };
  190. *out_hwirq = intspec[0];
  191. if (intsize > 1) {
  192. hwirq_intspec[intspec[0]] = intspec[1];
  193. *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
  194. ~IRQ_TYPE_MPIC_DIRECT];
  195. } else {
  196. *out_flags = IRQ_TYPE_NONE;
  197. }
  198. return 0;
  199. }
  200. static const struct irq_domain_ops ehv_pic_host_ops = {
  201. .match = ehv_pic_host_match,
  202. .map = ehv_pic_host_map,
  203. .xlate = ehv_pic_host_xlate,
  204. };
  205. void __init ehv_pic_init(void)
  206. {
  207. struct device_node *np, *np2;
  208. struct ehv_pic *ehv_pic;
  209. int coreint_flag = 1;
  210. np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
  211. if (!np) {
  212. pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
  213. return;
  214. }
  215. if (!of_find_property(np, "has-external-proxy", NULL))
  216. coreint_flag = 0;
  217. ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
  218. if (!ehv_pic) {
  219. of_node_put(np);
  220. return;
  221. }
  222. ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
  223. &ehv_pic_host_ops, ehv_pic);
  224. if (!ehv_pic->irqhost) {
  225. of_node_put(np);
  226. kfree(ehv_pic);
  227. return;
  228. }
  229. np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
  230. if (np2) {
  231. mpic_percpu_base_vaddr = of_iomap(np2, 0);
  232. if (!mpic_percpu_base_vaddr)
  233. pr_err("ehv_pic_init: of_iomap failed\n");
  234. of_node_put(np2);
  235. }
  236. ehv_pic->hc_irq = ehv_pic_irq_chip;
  237. ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
  238. ehv_pic->coreint_flag = coreint_flag;
  239. global_ehv_pic = ehv_pic;
  240. irq_set_default_host(global_ehv_pic->irqhost);
  241. }