iosapic.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. /*
  2. * I/O SAPIC support.
  3. *
  4. * Copyright (C) 1999 Intel Corp.
  5. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
  7. * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 VA Linux Systems
  10. * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  11. *
  12. * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
  13. * APIC code. In particular, we now have separate
  14. * handlers for edge and level triggered
  15. * interrupts.
  16. * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
  17. * allocation PCI to vector mapping, shared PCI
  18. * interrupts.
  19. * 00/10/27 D. Mosberger Document things a bit more to make them more
  20. * understandable. Clean up much of the old
  21. * IOSAPIC cruft.
  22. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
  23. * and fixes for ACPI S5(SoftOff) support.
  24. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
  25. * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
  26. * vectors in iosapic_set_affinity(),
  27. * initializations for /proc/irq/#/smp_affinity
  28. * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
  29. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
  30. * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
  31. * IOSAPIC mapping error
  32. * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
  33. * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
  34. * interrupt, vector, etc.)
  35. * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
  36. * pci_irq code.
  37. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
  38. * Remove iosapic_address & gsi_base from
  39. * external interfaces. Rationalize
  40. * __init/__devinit attributes.
  41. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
  42. * Updated to work with irq migration necessary
  43. * for CPU Hotplug
  44. */
  45. /*
  46. * Here is what the interrupt logic between a PCI device and the kernel looks
  47. * like:
  48. *
  49. * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
  50. * INTD). The device is uniquely identified by its bus-, and slot-number
  51. * (the function number does not matter here because all functions share
  52. * the same interrupt lines).
  53. *
  54. * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
  55. * controller. Multiple interrupt lines may have to share the same
  56. * IOSAPIC pin (if they're level triggered and use the same polarity).
  57. * Each interrupt line has a unique Global System Interrupt (GSI) number
  58. * which can be calculated as the sum of the controller's base GSI number
  59. * and the IOSAPIC pin number to which the line connects.
  60. *
  61. * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
  62. * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
  63. * sent to the CPU.
  64. *
  65. * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
  66. * used as architecture-independent interrupt handling mechanism in Linux.
  67. * As an IRQ is a number, we have to have
  68. * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
  69. * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
  70. * platform can implement platform_irq_to_vector(irq) and
  71. * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  72. * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
  73. *
  74. * To sum up, there are three levels of mappings involved:
  75. *
  76. * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  77. *
  78. * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
  79. * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
  80. * (isa_irq) is the only exception in this source code.
  81. */
  82. #include <linux/acpi.h>
  83. #include <linux/init.h>
  84. #include <linux/irq.h>
  85. #include <linux/kernel.h>
  86. #include <linux/list.h>
  87. #include <linux/pci.h>
  88. #include <linux/slab.h>
  89. #include <linux/smp.h>
  90. #include <linux/string.h>
  91. #include <linux/bootmem.h>
  92. #include <asm/delay.h>
  93. #include <asm/hw_irq.h>
  94. #include <asm/io.h>
  95. #include <asm/iosapic.h>
  96. #include <asm/machvec.h>
  97. #include <asm/processor.h>
  98. #include <asm/ptrace.h>
  99. #undef DEBUG_INTERRUPT_ROUTING
  100. #ifdef DEBUG_INTERRUPT_ROUTING
  101. #define DBG(fmt...) printk(fmt)
  102. #else
  103. #define DBG(fmt...)
  104. #endif
  105. static DEFINE_SPINLOCK(iosapic_lock);
  106. /*
  107. * These tables map IA-64 vectors to the IOSAPIC pin that generates this
  108. * vector.
  109. */
  110. #define NO_REF_RTE 0
  111. static struct iosapic {
  112. char __iomem *addr; /* base address of IOSAPIC */
  113. unsigned int gsi_base; /* GSI base */
  114. unsigned short num_rte; /* # of RTEs on this IOSAPIC */
  115. int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
  116. #ifdef CONFIG_NUMA
  117. unsigned short node; /* numa node association via pxm */
  118. #endif
  119. spinlock_t lock; /* lock for indirect reg access */
  120. } iosapic_lists[NR_IOSAPICS];
  121. struct iosapic_rte_info {
  122. struct list_head rte_list; /* RTEs sharing the same vector */
  123. char rte_index; /* IOSAPIC RTE index */
  124. int refcnt; /* reference counter */
  125. struct iosapic *iosapic;
  126. } ____cacheline_aligned;
  127. static struct iosapic_intr_info {
  128. struct list_head rtes; /* RTEs using this vector (empty =>
  129. * not an IOSAPIC interrupt) */
  130. int count; /* # of registered RTEs */
  131. u32 low32; /* current value of low word of
  132. * Redirection table entry */
  133. unsigned int dest; /* destination CPU physical ID */
  134. unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
  135. unsigned char polarity: 1; /* interrupt polarity
  136. * (see iosapic.h) */
  137. unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
  138. } iosapic_intr_info[NR_IRQS];
  139. static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
  140. static inline void
  141. iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
  142. {
  143. unsigned long flags;
  144. spin_lock_irqsave(&iosapic->lock, flags);
  145. __iosapic_write(iosapic->addr, reg, val);
  146. spin_unlock_irqrestore(&iosapic->lock, flags);
  147. }
  148. /*
  149. * Find an IOSAPIC associated with a GSI
  150. */
  151. static inline int
  152. find_iosapic (unsigned int gsi)
  153. {
  154. int i;
  155. for (i = 0; i < NR_IOSAPICS; i++) {
  156. if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
  157. iosapic_lists[i].num_rte)
  158. return i;
  159. }
  160. return -1;
  161. }
  162. static inline int __gsi_to_irq(unsigned int gsi)
  163. {
  164. int irq;
  165. struct iosapic_intr_info *info;
  166. struct iosapic_rte_info *rte;
  167. for (irq = 0; irq < NR_IRQS; irq++) {
  168. info = &iosapic_intr_info[irq];
  169. list_for_each_entry(rte, &info->rtes, rte_list)
  170. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  171. return irq;
  172. }
  173. return -1;
  174. }
  175. int
  176. gsi_to_irq (unsigned int gsi)
  177. {
  178. unsigned long flags;
  179. int irq;
  180. spin_lock_irqsave(&iosapic_lock, flags);
  181. irq = __gsi_to_irq(gsi);
  182. spin_unlock_irqrestore(&iosapic_lock, flags);
  183. return irq;
  184. }
  185. static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
  186. {
  187. struct iosapic_rte_info *rte;
  188. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  189. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  190. return rte;
  191. return NULL;
  192. }
  193. static void
  194. set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
  195. {
  196. unsigned long pol, trigger, dmode;
  197. u32 low32, high32;
  198. int rte_index;
  199. char redir;
  200. struct iosapic_rte_info *rte;
  201. ia64_vector vector = irq_to_vector(irq);
  202. DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
  203. rte = find_rte(irq, gsi);
  204. if (!rte)
  205. return; /* not an IOSAPIC interrupt */
  206. rte_index = rte->rte_index;
  207. pol = iosapic_intr_info[irq].polarity;
  208. trigger = iosapic_intr_info[irq].trigger;
  209. dmode = iosapic_intr_info[irq].dmode;
  210. redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
  211. #ifdef CONFIG_SMP
  212. set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
  213. #endif
  214. low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
  215. (trigger << IOSAPIC_TRIGGER_SHIFT) |
  216. (dmode << IOSAPIC_DELIVERY_SHIFT) |
  217. ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
  218. vector);
  219. /* dest contains both id and eid */
  220. high32 = (dest << IOSAPIC_DEST_SHIFT);
  221. iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  222. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  223. iosapic_intr_info[irq].low32 = low32;
  224. iosapic_intr_info[irq].dest = dest;
  225. }
  226. static void
  227. nop (struct irq_data *data)
  228. {
  229. /* do nothing... */
  230. }
  231. #ifdef CONFIG_KEXEC
  232. void
  233. kexec_disable_iosapic(void)
  234. {
  235. struct iosapic_intr_info *info;
  236. struct iosapic_rte_info *rte;
  237. ia64_vector vec;
  238. int irq;
  239. for (irq = 0; irq < NR_IRQS; irq++) {
  240. info = &iosapic_intr_info[irq];
  241. vec = irq_to_vector(irq);
  242. list_for_each_entry(rte, &info->rtes,
  243. rte_list) {
  244. iosapic_write(rte->iosapic,
  245. IOSAPIC_RTE_LOW(rte->rte_index),
  246. IOSAPIC_MASK|vec);
  247. iosapic_eoi(rte->iosapic->addr, vec);
  248. }
  249. }
  250. }
  251. #endif
  252. static void
  253. mask_irq (struct irq_data *data)
  254. {
  255. unsigned int irq = data->irq;
  256. u32 low32;
  257. int rte_index;
  258. struct iosapic_rte_info *rte;
  259. if (!iosapic_intr_info[irq].count)
  260. return; /* not an IOSAPIC interrupt! */
  261. /* set only the mask bit */
  262. low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  263. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  264. rte_index = rte->rte_index;
  265. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  266. }
  267. }
  268. static void
  269. unmask_irq (struct irq_data *data)
  270. {
  271. unsigned int irq = data->irq;
  272. u32 low32;
  273. int rte_index;
  274. struct iosapic_rte_info *rte;
  275. if (!iosapic_intr_info[irq].count)
  276. return; /* not an IOSAPIC interrupt! */
  277. low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
  278. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  279. rte_index = rte->rte_index;
  280. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  281. }
  282. }
  283. static int
  284. iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
  285. bool force)
  286. {
  287. #ifdef CONFIG_SMP
  288. unsigned int irq = data->irq;
  289. u32 high32, low32;
  290. int cpu, dest, rte_index;
  291. int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
  292. struct iosapic_rte_info *rte;
  293. struct iosapic *iosapic;
  294. irq &= (~IA64_IRQ_REDIRECTED);
  295. cpu = cpumask_first_and(cpu_online_mask, mask);
  296. if (cpu >= nr_cpu_ids)
  297. return -1;
  298. if (irq_prepare_move(irq, cpu))
  299. return -1;
  300. dest = cpu_physical_id(cpu);
  301. if (!iosapic_intr_info[irq].count)
  302. return -1; /* not an IOSAPIC interrupt */
  303. set_irq_affinity_info(irq, dest, redir);
  304. /* dest contains both id and eid */
  305. high32 = dest << IOSAPIC_DEST_SHIFT;
  306. low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
  307. if (redir)
  308. /* change delivery mode to lowest priority */
  309. low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
  310. else
  311. /* change delivery mode to fixed */
  312. low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
  313. low32 &= IOSAPIC_VECTOR_MASK;
  314. low32 |= irq_to_vector(irq);
  315. iosapic_intr_info[irq].low32 = low32;
  316. iosapic_intr_info[irq].dest = dest;
  317. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  318. iosapic = rte->iosapic;
  319. rte_index = rte->rte_index;
  320. iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  321. iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  322. }
  323. #endif
  324. return 0;
  325. }
  326. /*
  327. * Handlers for level-triggered interrupts.
  328. */
  329. static unsigned int
  330. iosapic_startup_level_irq (struct irq_data *data)
  331. {
  332. unmask_irq(data);
  333. return 0;
  334. }
  335. static void
  336. iosapic_unmask_level_irq (struct irq_data *data)
  337. {
  338. unsigned int irq = data->irq;
  339. ia64_vector vec = irq_to_vector(irq);
  340. struct iosapic_rte_info *rte;
  341. int do_unmask_irq = 0;
  342. irq_complete_move(irq);
  343. if (unlikely(irqd_is_setaffinity_pending(data))) {
  344. do_unmask_irq = 1;
  345. mask_irq(data);
  346. } else
  347. unmask_irq(data);
  348. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  349. iosapic_eoi(rte->iosapic->addr, vec);
  350. if (unlikely(do_unmask_irq)) {
  351. irq_move_masked_irq(data);
  352. unmask_irq(data);
  353. }
  354. }
  355. #define iosapic_shutdown_level_irq mask_irq
  356. #define iosapic_enable_level_irq unmask_irq
  357. #define iosapic_disable_level_irq mask_irq
  358. #define iosapic_ack_level_irq nop
  359. static struct irq_chip irq_type_iosapic_level = {
  360. .name = "IO-SAPIC-level",
  361. .irq_startup = iosapic_startup_level_irq,
  362. .irq_shutdown = iosapic_shutdown_level_irq,
  363. .irq_enable = iosapic_enable_level_irq,
  364. .irq_disable = iosapic_disable_level_irq,
  365. .irq_ack = iosapic_ack_level_irq,
  366. .irq_mask = mask_irq,
  367. .irq_unmask = iosapic_unmask_level_irq,
  368. .irq_set_affinity = iosapic_set_affinity
  369. };
  370. /*
  371. * Handlers for edge-triggered interrupts.
  372. */
  373. static unsigned int
  374. iosapic_startup_edge_irq (struct irq_data *data)
  375. {
  376. unmask_irq(data);
  377. /*
  378. * IOSAPIC simply drops interrupts pended while the
  379. * corresponding pin was masked, so we can't know if an
  380. * interrupt is pending already. Let's hope not...
  381. */
  382. return 0;
  383. }
  384. static void
  385. iosapic_ack_edge_irq (struct irq_data *data)
  386. {
  387. irq_complete_move(data->irq);
  388. irq_move_irq(data);
  389. }
  390. #define iosapic_enable_edge_irq unmask_irq
  391. #define iosapic_disable_edge_irq nop
  392. static struct irq_chip irq_type_iosapic_edge = {
  393. .name = "IO-SAPIC-edge",
  394. .irq_startup = iosapic_startup_edge_irq,
  395. .irq_shutdown = iosapic_disable_edge_irq,
  396. .irq_enable = iosapic_enable_edge_irq,
  397. .irq_disable = iosapic_disable_edge_irq,
  398. .irq_ack = iosapic_ack_edge_irq,
  399. .irq_mask = mask_irq,
  400. .irq_unmask = unmask_irq,
  401. .irq_set_affinity = iosapic_set_affinity
  402. };
  403. static unsigned int
  404. iosapic_version (char __iomem *addr)
  405. {
  406. /*
  407. * IOSAPIC Version Register return 32 bit structure like:
  408. * {
  409. * unsigned int version : 8;
  410. * unsigned int reserved1 : 8;
  411. * unsigned int max_redir : 8;
  412. * unsigned int reserved2 : 8;
  413. * }
  414. */
  415. return __iosapic_read(addr, IOSAPIC_VERSION);
  416. }
  417. static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
  418. {
  419. int i, irq = -ENOSPC, min_count = -1;
  420. struct iosapic_intr_info *info;
  421. /*
  422. * shared vectors for edge-triggered interrupts are not
  423. * supported yet
  424. */
  425. if (trigger == IOSAPIC_EDGE)
  426. return -EINVAL;
  427. for (i = 0; i < NR_IRQS; i++) {
  428. info = &iosapic_intr_info[i];
  429. if (info->trigger == trigger && info->polarity == pol &&
  430. (info->dmode == IOSAPIC_FIXED ||
  431. info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
  432. can_request_irq(i, IRQF_SHARED)) {
  433. if (min_count == -1 || info->count < min_count) {
  434. irq = i;
  435. min_count = info->count;
  436. }
  437. }
  438. }
  439. return irq;
  440. }
  441. /*
  442. * if the given vector is already owned by other,
  443. * assign a new vector for the other and make the vector available
  444. */
  445. static void __init
  446. iosapic_reassign_vector (int irq)
  447. {
  448. int new_irq;
  449. if (iosapic_intr_info[irq].count) {
  450. new_irq = create_irq();
  451. if (new_irq < 0)
  452. panic("%s: out of interrupt vectors!\n", __func__);
  453. printk(KERN_INFO "Reassigning vector %d to %d\n",
  454. irq_to_vector(irq), irq_to_vector(new_irq));
  455. memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
  456. sizeof(struct iosapic_intr_info));
  457. INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
  458. list_move(iosapic_intr_info[irq].rtes.next,
  459. &iosapic_intr_info[new_irq].rtes);
  460. memset(&iosapic_intr_info[irq], 0,
  461. sizeof(struct iosapic_intr_info));
  462. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  463. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  464. }
  465. }
  466. static inline int irq_is_shared (int irq)
  467. {
  468. return (iosapic_intr_info[irq].count > 1);
  469. }
  470. struct irq_chip*
  471. ia64_native_iosapic_get_irq_chip(unsigned long trigger)
  472. {
  473. if (trigger == IOSAPIC_EDGE)
  474. return &irq_type_iosapic_edge;
  475. else
  476. return &irq_type_iosapic_level;
  477. }
  478. static int
  479. register_intr (unsigned int gsi, int irq, unsigned char delivery,
  480. unsigned long polarity, unsigned long trigger)
  481. {
  482. struct irq_chip *chip, *irq_type;
  483. int index;
  484. struct iosapic_rte_info *rte;
  485. index = find_iosapic(gsi);
  486. if (index < 0) {
  487. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  488. __func__, gsi);
  489. return -ENODEV;
  490. }
  491. rte = find_rte(irq, gsi);
  492. if (!rte) {
  493. rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
  494. if (!rte) {
  495. printk(KERN_WARNING "%s: cannot allocate memory\n",
  496. __func__);
  497. return -ENOMEM;
  498. }
  499. rte->iosapic = &iosapic_lists[index];
  500. rte->rte_index = gsi - rte->iosapic->gsi_base;
  501. rte->refcnt++;
  502. list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
  503. iosapic_intr_info[irq].count++;
  504. iosapic_lists[index].rtes_inuse++;
  505. }
  506. else if (rte->refcnt == NO_REF_RTE) {
  507. struct iosapic_intr_info *info = &iosapic_intr_info[irq];
  508. if (info->count > 0 &&
  509. (info->trigger != trigger || info->polarity != polarity)){
  510. printk (KERN_WARNING
  511. "%s: cannot override the interrupt\n",
  512. __func__);
  513. return -EINVAL;
  514. }
  515. rte->refcnt++;
  516. iosapic_intr_info[irq].count++;
  517. iosapic_lists[index].rtes_inuse++;
  518. }
  519. iosapic_intr_info[irq].polarity = polarity;
  520. iosapic_intr_info[irq].dmode = delivery;
  521. iosapic_intr_info[irq].trigger = trigger;
  522. irq_type = iosapic_get_irq_chip(trigger);
  523. chip = irq_get_chip(irq);
  524. if (irq_type != NULL && chip != irq_type) {
  525. if (chip != &no_irq_chip)
  526. printk(KERN_WARNING
  527. "%s: changing vector %d from %s to %s\n",
  528. __func__, irq_to_vector(irq),
  529. chip->name, irq_type->name);
  530. chip = irq_type;
  531. }
  532. __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
  533. handle_edge_irq : handle_level_irq,
  534. NULL);
  535. return 0;
  536. }
  537. static unsigned int
  538. get_target_cpu (unsigned int gsi, int irq)
  539. {
  540. #ifdef CONFIG_SMP
  541. static int cpu = -1;
  542. extern int cpe_vector;
  543. cpumask_t domain = irq_to_domain(irq);
  544. /*
  545. * In case of vector shared by multiple RTEs, all RTEs that
  546. * share the vector need to use the same destination CPU.
  547. */
  548. if (iosapic_intr_info[irq].count)
  549. return iosapic_intr_info[irq].dest;
  550. /*
  551. * If the platform supports redirection via XTP, let it
  552. * distribute interrupts.
  553. */
  554. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  555. return cpu_physical_id(smp_processor_id());
  556. /*
  557. * Some interrupts (ACPI SCI, for instance) are registered
  558. * before the BSP is marked as online.
  559. */
  560. if (!cpu_online(smp_processor_id()))
  561. return cpu_physical_id(smp_processor_id());
  562. #ifdef CONFIG_ACPI
  563. if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
  564. return get_cpei_target_cpu();
  565. #endif
  566. #ifdef CONFIG_NUMA
  567. {
  568. int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
  569. const struct cpumask *cpu_mask;
  570. iosapic_index = find_iosapic(gsi);
  571. if (iosapic_index < 0 ||
  572. iosapic_lists[iosapic_index].node == MAX_NUMNODES)
  573. goto skip_numa_setup;
  574. cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
  575. num_cpus = 0;
  576. for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
  577. if (cpu_online(numa_cpu))
  578. num_cpus++;
  579. }
  580. if (!num_cpus)
  581. goto skip_numa_setup;
  582. /* Use irq assignment to distribute across cpus in node */
  583. cpu_index = irq % num_cpus;
  584. for_each_cpu_and(numa_cpu, cpu_mask, &domain)
  585. if (cpu_online(numa_cpu) && i++ >= cpu_index)
  586. break;
  587. if (numa_cpu < nr_cpu_ids)
  588. return cpu_physical_id(numa_cpu);
  589. }
  590. skip_numa_setup:
  591. #endif
  592. /*
  593. * Otherwise, round-robin interrupt vectors across all the
  594. * processors. (It'd be nice if we could be smarter in the
  595. * case of NUMA.)
  596. */
  597. do {
  598. if (++cpu >= nr_cpu_ids)
  599. cpu = 0;
  600. } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
  601. return cpu_physical_id(cpu);
  602. #else /* CONFIG_SMP */
  603. return cpu_physical_id(smp_processor_id());
  604. #endif
  605. }
  606. static inline unsigned char choose_dmode(void)
  607. {
  608. #ifdef CONFIG_SMP
  609. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  610. return IOSAPIC_LOWEST_PRIORITY;
  611. #endif
  612. return IOSAPIC_FIXED;
  613. }
  614. /*
  615. * ACPI can describe IOSAPIC interrupts via static tables and namespace
  616. * methods. This provides an interface to register those interrupts and
  617. * program the IOSAPIC RTE.
  618. */
  619. int
  620. iosapic_register_intr (unsigned int gsi,
  621. unsigned long polarity, unsigned long trigger)
  622. {
  623. int irq, mask = 1, err;
  624. unsigned int dest;
  625. unsigned long flags;
  626. struct iosapic_rte_info *rte;
  627. u32 low32;
  628. unsigned char dmode;
  629. struct irq_desc *desc;
  630. /*
  631. * If this GSI has already been registered (i.e., it's a
  632. * shared interrupt, or we lost a race to register it),
  633. * don't touch the RTE.
  634. */
  635. spin_lock_irqsave(&iosapic_lock, flags);
  636. irq = __gsi_to_irq(gsi);
  637. if (irq > 0) {
  638. rte = find_rte(irq, gsi);
  639. if(iosapic_intr_info[irq].count == 0) {
  640. assign_irq_vector(irq);
  641. dynamic_irq_init(irq);
  642. } else if (rte->refcnt != NO_REF_RTE) {
  643. rte->refcnt++;
  644. goto unlock_iosapic_lock;
  645. }
  646. } else
  647. irq = create_irq();
  648. /* If vector is running out, we try to find a sharable vector */
  649. if (irq < 0) {
  650. irq = iosapic_find_sharable_irq(trigger, polarity);
  651. if (irq < 0)
  652. goto unlock_iosapic_lock;
  653. }
  654. desc = irq_to_desc(irq);
  655. raw_spin_lock(&desc->lock);
  656. dest = get_target_cpu(gsi, irq);
  657. dmode = choose_dmode();
  658. err = register_intr(gsi, irq, dmode, polarity, trigger);
  659. if (err < 0) {
  660. raw_spin_unlock(&desc->lock);
  661. irq = err;
  662. goto unlock_iosapic_lock;
  663. }
  664. /*
  665. * If the vector is shared and already unmasked for other
  666. * interrupt sources, don't mask it.
  667. */
  668. low32 = iosapic_intr_info[irq].low32;
  669. if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
  670. mask = 0;
  671. set_rte(gsi, irq, dest, mask);
  672. printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
  673. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  674. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  675. cpu_logical_id(dest), dest, irq_to_vector(irq));
  676. raw_spin_unlock(&desc->lock);
  677. unlock_iosapic_lock:
  678. spin_unlock_irqrestore(&iosapic_lock, flags);
  679. return irq;
  680. }
  681. void
  682. iosapic_unregister_intr (unsigned int gsi)
  683. {
  684. unsigned long flags;
  685. int irq, index;
  686. u32 low32;
  687. unsigned long trigger, polarity;
  688. unsigned int dest;
  689. struct iosapic_rte_info *rte;
  690. /*
  691. * If the irq associated with the gsi is not found,
  692. * iosapic_unregister_intr() is unbalanced. We need to check
  693. * this again after getting locks.
  694. */
  695. irq = gsi_to_irq(gsi);
  696. if (irq < 0) {
  697. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  698. gsi);
  699. WARN_ON(1);
  700. return;
  701. }
  702. spin_lock_irqsave(&iosapic_lock, flags);
  703. if ((rte = find_rte(irq, gsi)) == NULL) {
  704. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  705. gsi);
  706. WARN_ON(1);
  707. goto out;
  708. }
  709. if (--rte->refcnt > 0)
  710. goto out;
  711. rte->refcnt = NO_REF_RTE;
  712. /* Mask the interrupt */
  713. low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
  714. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
  715. iosapic_intr_info[irq].count--;
  716. index = find_iosapic(gsi);
  717. iosapic_lists[index].rtes_inuse--;
  718. WARN_ON(iosapic_lists[index].rtes_inuse < 0);
  719. trigger = iosapic_intr_info[irq].trigger;
  720. polarity = iosapic_intr_info[irq].polarity;
  721. dest = iosapic_intr_info[irq].dest;
  722. printk(KERN_INFO
  723. "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
  724. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  725. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  726. cpu_logical_id(dest), dest, irq_to_vector(irq));
  727. if (iosapic_intr_info[irq].count == 0) {
  728. #ifdef CONFIG_SMP
  729. /* Clear affinity */
  730. cpumask_setall(irq_get_irq_data(irq)->affinity);
  731. #endif
  732. /* Clear the interrupt information */
  733. iosapic_intr_info[irq].dest = 0;
  734. iosapic_intr_info[irq].dmode = 0;
  735. iosapic_intr_info[irq].polarity = 0;
  736. iosapic_intr_info[irq].trigger = 0;
  737. iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  738. /* Destroy and reserve IRQ */
  739. destroy_and_reserve_irq(irq);
  740. }
  741. out:
  742. spin_unlock_irqrestore(&iosapic_lock, flags);
  743. }
  744. /*
  745. * ACPI calls this when it finds an entry for a platform interrupt.
  746. */
  747. int __init
  748. iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
  749. int iosapic_vector, u16 eid, u16 id,
  750. unsigned long polarity, unsigned long trigger)
  751. {
  752. static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
  753. unsigned char delivery;
  754. int irq, vector, mask = 0;
  755. unsigned int dest = ((id << 8) | eid) & 0xffff;
  756. switch (int_type) {
  757. case ACPI_INTERRUPT_PMI:
  758. irq = vector = iosapic_vector;
  759. bind_irq_vector(irq, vector, CPU_MASK_ALL);
  760. /*
  761. * since PMI vector is alloc'd by FW(ACPI) not by kernel,
  762. * we need to make sure the vector is available
  763. */
  764. iosapic_reassign_vector(irq);
  765. delivery = IOSAPIC_PMI;
  766. break;
  767. case ACPI_INTERRUPT_INIT:
  768. irq = create_irq();
  769. if (irq < 0)
  770. panic("%s: out of interrupt vectors!\n", __func__);
  771. vector = irq_to_vector(irq);
  772. delivery = IOSAPIC_INIT;
  773. break;
  774. case ACPI_INTERRUPT_CPEI:
  775. irq = vector = IA64_CPE_VECTOR;
  776. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  777. delivery = IOSAPIC_FIXED;
  778. mask = 1;
  779. break;
  780. default:
  781. printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
  782. int_type);
  783. return -1;
  784. }
  785. register_intr(gsi, irq, delivery, polarity, trigger);
  786. printk(KERN_INFO
  787. "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
  788. " vector %d\n",
  789. int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
  790. int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  791. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  792. cpu_logical_id(dest), dest, vector);
  793. set_rte(gsi, irq, dest, mask);
  794. return vector;
  795. }
  796. /*
  797. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
  798. */
  799. void __devinit
  800. iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
  801. unsigned long polarity,
  802. unsigned long trigger)
  803. {
  804. int vector, irq;
  805. unsigned int dest = cpu_physical_id(smp_processor_id());
  806. unsigned char dmode;
  807. irq = vector = isa_irq_to_vector(isa_irq);
  808. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  809. dmode = choose_dmode();
  810. register_intr(gsi, irq, dmode, polarity, trigger);
  811. DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
  812. isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
  813. polarity == IOSAPIC_POL_HIGH ? "high" : "low",
  814. cpu_logical_id(dest), dest, vector);
  815. set_rte(gsi, irq, dest, 1);
  816. }
  817. void __init
  818. ia64_native_iosapic_pcat_compat_init(void)
  819. {
  820. if (pcat_compat) {
  821. /*
  822. * Disable the compatibility mode interrupts (8259 style),
  823. * needs IN/OUT support enabled.
  824. */
  825. printk(KERN_INFO
  826. "%s: Disabling PC-AT compatible 8259 interrupts\n",
  827. __func__);
  828. outb(0xff, 0xA1);
  829. outb(0xff, 0x21);
  830. }
  831. }
  832. void __init
  833. iosapic_system_init (int system_pcat_compat)
  834. {
  835. int irq;
  836. for (irq = 0; irq < NR_IRQS; ++irq) {
  837. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  838. /* mark as unused */
  839. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  840. iosapic_intr_info[irq].count = 0;
  841. }
  842. pcat_compat = system_pcat_compat;
  843. if (pcat_compat)
  844. iosapic_pcat_compat_init();
  845. }
  846. static inline int
  847. iosapic_alloc (void)
  848. {
  849. int index;
  850. for (index = 0; index < NR_IOSAPICS; index++)
  851. if (!iosapic_lists[index].addr)
  852. return index;
  853. printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
  854. return -1;
  855. }
  856. static inline void
  857. iosapic_free (int index)
  858. {
  859. memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
  860. }
  861. static inline int
  862. iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
  863. {
  864. int index;
  865. unsigned int gsi_end, base, end;
  866. /* check gsi range */
  867. gsi_end = gsi_base + ((ver >> 16) & 0xff);
  868. for (index = 0; index < NR_IOSAPICS; index++) {
  869. if (!iosapic_lists[index].addr)
  870. continue;
  871. base = iosapic_lists[index].gsi_base;
  872. end = base + iosapic_lists[index].num_rte - 1;
  873. if (gsi_end < base || end < gsi_base)
  874. continue; /* OK */
  875. return -EBUSY;
  876. }
  877. return 0;
  878. }
  879. int __devinit
  880. iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
  881. {
  882. int num_rte, err, index;
  883. unsigned int isa_irq, ver;
  884. char __iomem *addr;
  885. unsigned long flags;
  886. spin_lock_irqsave(&iosapic_lock, flags);
  887. index = find_iosapic(gsi_base);
  888. if (index >= 0) {
  889. spin_unlock_irqrestore(&iosapic_lock, flags);
  890. return -EBUSY;
  891. }
  892. addr = ioremap(phys_addr, 0);
  893. if (addr == NULL) {
  894. spin_unlock_irqrestore(&iosapic_lock, flags);
  895. return -ENOMEM;
  896. }
  897. ver = iosapic_version(addr);
  898. if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
  899. iounmap(addr);
  900. spin_unlock_irqrestore(&iosapic_lock, flags);
  901. return err;
  902. }
  903. /*
  904. * The MAX_REDIR register holds the highest input pin number
  905. * (starting from 0). We add 1 so that we can use it for
  906. * number of pins (= RTEs)
  907. */
  908. num_rte = ((ver >> 16) & 0xff) + 1;
  909. index = iosapic_alloc();
  910. iosapic_lists[index].addr = addr;
  911. iosapic_lists[index].gsi_base = gsi_base;
  912. iosapic_lists[index].num_rte = num_rte;
  913. #ifdef CONFIG_NUMA
  914. iosapic_lists[index].node = MAX_NUMNODES;
  915. #endif
  916. spin_lock_init(&iosapic_lists[index].lock);
  917. spin_unlock_irqrestore(&iosapic_lock, flags);
  918. if ((gsi_base == 0) && pcat_compat) {
  919. /*
  920. * Map the legacy ISA devices into the IOSAPIC data. Some of
  921. * these may get reprogrammed later on with data from the ACPI
  922. * Interrupt Source Override table.
  923. */
  924. for (isa_irq = 0; isa_irq < 16; ++isa_irq)
  925. iosapic_override_isa_irq(isa_irq, isa_irq,
  926. IOSAPIC_POL_HIGH,
  927. IOSAPIC_EDGE);
  928. }
  929. return 0;
  930. }
  931. #ifdef CONFIG_HOTPLUG
  932. int
  933. iosapic_remove (unsigned int gsi_base)
  934. {
  935. int index, err = 0;
  936. unsigned long flags;
  937. spin_lock_irqsave(&iosapic_lock, flags);
  938. index = find_iosapic(gsi_base);
  939. if (index < 0) {
  940. printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
  941. __func__, gsi_base);
  942. goto out;
  943. }
  944. if (iosapic_lists[index].rtes_inuse) {
  945. err = -EBUSY;
  946. printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
  947. __func__, gsi_base);
  948. goto out;
  949. }
  950. iounmap(iosapic_lists[index].addr);
  951. iosapic_free(index);
  952. out:
  953. spin_unlock_irqrestore(&iosapic_lock, flags);
  954. return err;
  955. }
  956. #endif /* CONFIG_HOTPLUG */
  957. #ifdef CONFIG_NUMA
  958. void __devinit
  959. map_iosapic_to_node(unsigned int gsi_base, int node)
  960. {
  961. int index;
  962. index = find_iosapic(gsi_base);
  963. if (index < 0) {
  964. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  965. __func__, gsi_base);
  966. return;
  967. }
  968. iosapic_lists[index].node = node;
  969. return;
  970. }
  971. #endif