i8259.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * i8259 interrupt controller driver.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #undef DEBUG
  10. #include <linux/init.h>
  11. #include <linux/ioport.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/delay.h>
  15. #include <asm/io.h>
  16. #include <asm/i8259.h>
  17. #include <asm/prom.h>
  18. static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
  19. static unsigned char cached_8259[2] = { 0xff, 0xff };
  20. #define cached_A1 (cached_8259[0])
  21. #define cached_21 (cached_8259[1])
  22. static DEFINE_RAW_SPINLOCK(i8259_lock);
  23. static struct irq_domain *i8259_host;
  24. /*
  25. * Acknowledge the IRQ using either the PCI host bridge's interrupt
  26. * acknowledge feature or poll. How i8259_init() is called determines
  27. * which is called. It should be noted that polling is broken on some
  28. * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
  29. */
  30. unsigned int i8259_irq(void)
  31. {
  32. int irq;
  33. int lock = 0;
  34. /* Either int-ack or poll for the IRQ */
  35. if (pci_intack)
  36. irq = readb(pci_intack);
  37. else {
  38. raw_spin_lock(&i8259_lock);
  39. lock = 1;
  40. /* Perform an interrupt acknowledge cycle on controller 1. */
  41. outb(0x0C, 0x20); /* prepare for poll */
  42. irq = inb(0x20) & 7;
  43. if (irq == 2 ) {
  44. /*
  45. * Interrupt is cascaded so perform interrupt
  46. * acknowledge on controller 2.
  47. */
  48. outb(0x0C, 0xA0); /* prepare for poll */
  49. irq = (inb(0xA0) & 7) + 8;
  50. }
  51. }
  52. if (irq == 7) {
  53. /*
  54. * This may be a spurious interrupt.
  55. *
  56. * Read the interrupt status register (ISR). If the most
  57. * significant bit is not set then there is no valid
  58. * interrupt.
  59. */
  60. if (!pci_intack)
  61. outb(0x0B, 0x20); /* ISR register */
  62. if(~inb(0x20) & 0x80)
  63. irq = NO_IRQ;
  64. } else if (irq == 0xff)
  65. irq = NO_IRQ;
  66. if (lock)
  67. raw_spin_unlock(&i8259_lock);
  68. return irq;
  69. }
  70. static void i8259_mask_and_ack_irq(struct irq_data *d)
  71. {
  72. unsigned long flags;
  73. raw_spin_lock_irqsave(&i8259_lock, flags);
  74. if (d->irq > 7) {
  75. cached_A1 |= 1 << (d->irq-8);
  76. inb(0xA1); /* DUMMY */
  77. outb(cached_A1, 0xA1);
  78. outb(0x20, 0xA0); /* Non-specific EOI */
  79. outb(0x20, 0x20); /* Non-specific EOI to cascade */
  80. } else {
  81. cached_21 |= 1 << d->irq;
  82. inb(0x21); /* DUMMY */
  83. outb(cached_21, 0x21);
  84. outb(0x20, 0x20); /* Non-specific EOI */
  85. }
  86. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  87. }
  88. static void i8259_set_irq_mask(int irq_nr)
  89. {
  90. outb(cached_A1,0xA1);
  91. outb(cached_21,0x21);
  92. }
  93. static void i8259_mask_irq(struct irq_data *d)
  94. {
  95. unsigned long flags;
  96. pr_debug("i8259_mask_irq(%d)\n", d->irq);
  97. raw_spin_lock_irqsave(&i8259_lock, flags);
  98. if (d->irq < 8)
  99. cached_21 |= 1 << d->irq;
  100. else
  101. cached_A1 |= 1 << (d->irq-8);
  102. i8259_set_irq_mask(d->irq);
  103. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  104. }
  105. static void i8259_unmask_irq(struct irq_data *d)
  106. {
  107. unsigned long flags;
  108. pr_debug("i8259_unmask_irq(%d)\n", d->irq);
  109. raw_spin_lock_irqsave(&i8259_lock, flags);
  110. if (d->irq < 8)
  111. cached_21 &= ~(1 << d->irq);
  112. else
  113. cached_A1 &= ~(1 << (d->irq-8));
  114. i8259_set_irq_mask(d->irq);
  115. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  116. }
  117. static struct irq_chip i8259_pic = {
  118. .name = "i8259",
  119. .irq_mask = i8259_mask_irq,
  120. .irq_disable = i8259_mask_irq,
  121. .irq_unmask = i8259_unmask_irq,
  122. .irq_mask_ack = i8259_mask_and_ack_irq,
  123. };
  124. static struct resource pic1_iores = {
  125. .name = "8259 (master)",
  126. .start = 0x20,
  127. .end = 0x21,
  128. .flags = IORESOURCE_BUSY,
  129. };
  130. static struct resource pic2_iores = {
  131. .name = "8259 (slave)",
  132. .start = 0xa0,
  133. .end = 0xa1,
  134. .flags = IORESOURCE_BUSY,
  135. };
  136. static struct resource pic_edgectrl_iores = {
  137. .name = "8259 edge control",
  138. .start = 0x4d0,
  139. .end = 0x4d1,
  140. .flags = IORESOURCE_BUSY,
  141. };
  142. static int i8259_host_match(struct irq_domain *h, struct device_node *node)
  143. {
  144. return h->of_node == NULL || h->of_node == node;
  145. }
  146. static int i8259_host_map(struct irq_domain *h, unsigned int virq,
  147. irq_hw_number_t hw)
  148. {
  149. pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
  150. /* We block the internal cascade */
  151. if (hw == 2)
  152. irq_set_status_flags(virq, IRQ_NOREQUEST);
  153. /* We use the level handler only for now, we might want to
  154. * be more cautious here but that works for now
  155. */
  156. irq_set_status_flags(virq, IRQ_LEVEL);
  157. irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
  158. return 0;
  159. }
  160. static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
  161. const u32 *intspec, unsigned int intsize,
  162. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  163. {
  164. static unsigned char map_isa_senses[4] = {
  165. IRQ_TYPE_LEVEL_LOW,
  166. IRQ_TYPE_LEVEL_HIGH,
  167. IRQ_TYPE_EDGE_FALLING,
  168. IRQ_TYPE_EDGE_RISING,
  169. };
  170. *out_hwirq = intspec[0];
  171. if (intsize > 1 && intspec[1] < 4)
  172. *out_flags = map_isa_senses[intspec[1]];
  173. else
  174. *out_flags = IRQ_TYPE_NONE;
  175. return 0;
  176. }
  177. static struct irq_domain_ops i8259_host_ops = {
  178. .match = i8259_host_match,
  179. .map = i8259_host_map,
  180. .xlate = i8259_host_xlate,
  181. };
  182. struct irq_domain *i8259_get_host(void)
  183. {
  184. return i8259_host;
  185. }
  186. /**
  187. * i8259_init - Initialize the legacy controller
  188. * @node: device node of the legacy PIC (can be NULL, but then, it will match
  189. * all interrupts, so beware)
  190. * @intack_addr: PCI interrupt acknowledge (real) address which will return
  191. * the active irq from the 8259
  192. */
  193. void i8259_init(struct device_node *node, unsigned long intack_addr)
  194. {
  195. unsigned long flags;
  196. /* initialize the controller */
  197. raw_spin_lock_irqsave(&i8259_lock, flags);
  198. /* Mask all first */
  199. outb(0xff, 0xA1);
  200. outb(0xff, 0x21);
  201. /* init master interrupt controller */
  202. outb(0x11, 0x20); /* Start init sequence */
  203. outb(0x00, 0x21); /* Vector base */
  204. outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
  205. outb(0x01, 0x21); /* Select 8086 mode */
  206. /* init slave interrupt controller */
  207. outb(0x11, 0xA0); /* Start init sequence */
  208. outb(0x08, 0xA1); /* Vector base */
  209. outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
  210. outb(0x01, 0xA1); /* Select 8086 mode */
  211. /* That thing is slow */
  212. udelay(100);
  213. /* always read ISR */
  214. outb(0x0B, 0x20);
  215. outb(0x0B, 0xA0);
  216. /* Unmask the internal cascade */
  217. cached_21 &= ~(1 << 2);
  218. /* Set interrupt masks */
  219. outb(cached_A1, 0xA1);
  220. outb(cached_21, 0x21);
  221. raw_spin_unlock_irqrestore(&i8259_lock, flags);
  222. /* create a legacy host */
  223. i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
  224. if (i8259_host == NULL) {
  225. printk(KERN_ERR "i8259: failed to allocate irq host !\n");
  226. return;
  227. }
  228. /* reserve our resources */
  229. /* XXX should we continue doing that ? it seems to cause problems
  230. * with further requesting of PCI IO resources for that range...
  231. * need to look into it.
  232. */
  233. request_resource(&ioport_resource, &pic1_iores);
  234. request_resource(&ioport_resource, &pic2_iores);
  235. request_resource(&ioport_resource, &pic_edgectrl_iores);
  236. if (intack_addr != 0)
  237. pci_intack = ioremap(intack_addr, 1);
  238. printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
  239. }