ics.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. /*
  2. * Copyright 2008-2011 IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/cpu.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/msi.h>
  15. #include <linux/of.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/types.h>
  20. #include <asm/io.h>
  21. #include <asm/irq.h>
  22. #include <asm/xics.h>
  23. #include "wsp.h"
  24. #include "ics.h"
  25. /* WSP ICS */
  26. struct wsp_ics {
  27. struct ics ics;
  28. struct device_node *dn;
  29. void __iomem *regs;
  30. spinlock_t lock;
  31. unsigned long *bitmap;
  32. u32 chip_id;
  33. u32 lsi_base;
  34. u32 lsi_count;
  35. u64 hwirq_start;
  36. u64 count;
  37. #ifdef CONFIG_SMP
  38. int *hwirq_cpu_map;
  39. #endif
  40. };
  41. #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
  42. #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
  43. #define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
  44. #define IODA_TBL_DATA_REG(base) ((base) + 0x20)
  45. #define XIVE_UPDATE_REG(base) ((base) + 0x28)
  46. #define ICS_INT_CAPS_REG(base) ((base) + 0x30)
  47. #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
  48. #define TBL_SELECT_XIST (1UL << 48)
  49. #define TBL_SELECT_XIVT (1UL << 49)
  50. #define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
  51. #define XIST_REQUIRED 0x8
  52. #define XIST_REJECTED 0x4
  53. #define XIST_PRESENTED 0x2
  54. #define XIST_PENDING 0x1
  55. #define XIVE_SERVER_SHIFT 42
  56. #define XIVE_SERVER_MASK 0xFFFFULL
  57. #define XIVE_PRIORITY_MASK 0xFFULL
  58. #define XIVE_PRIORITY_SHIFT 32
  59. #define XIVE_WRITE_ENABLE (1ULL << 63)
  60. /*
  61. * The docs refer to a 6 bit field called ChipID, which consists of a
  62. * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
  63. * so we ignore it, and every where we use "chip id" in this code we
  64. * mean the NodeID.
  65. */
  66. #define WSP_ICS_CHIP_SHIFT 17
  67. static struct wsp_ics *ics_list;
  68. static int num_ics;
  69. /* ICS Source controller accessors */
  70. static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
  71. {
  72. unsigned long flags;
  73. u64 xive;
  74. spin_lock_irqsave(&ics->lock, flags);
  75. out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
  76. xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
  77. spin_unlock_irqrestore(&ics->lock, flags);
  78. return xive;
  79. }
  80. static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
  81. {
  82. xive &= ~XIVE_ADDR_MASK;
  83. xive |= (irq & XIVE_ADDR_MASK);
  84. xive |= XIVE_WRITE_ENABLE;
  85. out_be64(XIVE_UPDATE_REG(ics->regs), xive);
  86. }
  87. static u64 xive_set_server(u64 xive, unsigned int server)
  88. {
  89. u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
  90. xive &= mask;
  91. xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
  92. return xive;
  93. }
  94. static u64 xive_set_priority(u64 xive, unsigned int priority)
  95. {
  96. u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
  97. xive &= mask;
  98. xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
  99. return xive;
  100. }
  101. #ifdef CONFIG_SMP
  102. /* Find logical CPUs within mask on a given chip and store result in ret */
  103. void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
  104. {
  105. int cpu, chip;
  106. struct device_node *cpu_dn, *dn;
  107. const u32 *prop;
  108. cpumask_clear(ret);
  109. for_each_cpu(cpu, mask) {
  110. cpu_dn = of_get_cpu_node(cpu, NULL);
  111. if (!cpu_dn)
  112. continue;
  113. prop = of_get_property(cpu_dn, "at-node", NULL);
  114. if (!prop) {
  115. of_node_put(cpu_dn);
  116. continue;
  117. }
  118. dn = of_find_node_by_phandle(*prop);
  119. of_node_put(cpu_dn);
  120. chip = wsp_get_chip_id(dn);
  121. if (chip == chip_id)
  122. cpumask_set_cpu(cpu, ret);
  123. of_node_put(dn);
  124. }
  125. }
  126. /* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
  127. static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
  128. const cpumask_t *affinity)
  129. {
  130. cpumask_var_t avail, newmask;
  131. int ret = -ENOMEM, cpu, cpu_rover = 0, target;
  132. int index = hwirq - ics->hwirq_start;
  133. unsigned int nodeid;
  134. BUG_ON(index < 0 || index >= ics->count);
  135. if (!ics->hwirq_cpu_map)
  136. return -ENOMEM;
  137. if (!distribute_irqs) {
  138. ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
  139. return 0;
  140. }
  141. /* Allocate needed CPU masks */
  142. if (!alloc_cpumask_var(&avail, GFP_KERNEL))
  143. goto ret;
  144. if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
  145. goto freeavail;
  146. /* Find PBus attached to the source of this IRQ */
  147. nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
  148. /* Find CPUs that could handle this IRQ */
  149. if (affinity)
  150. cpumask_and(avail, cpu_online_mask, affinity);
  151. else
  152. cpumask_copy(avail, cpu_online_mask);
  153. /* Narrow selection down to logical CPUs on the same chip */
  154. cpus_on_chip(nodeid, avail, newmask);
  155. /* Ensure we haven't narrowed it down to 0 */
  156. if (unlikely(cpumask_empty(newmask))) {
  157. if (unlikely(cpumask_empty(avail))) {
  158. ret = -1;
  159. goto out;
  160. }
  161. cpumask_copy(newmask, avail);
  162. }
  163. /* Choose a CPU out of those we narrowed it down to in round robin */
  164. target = hwirq % cpumask_weight(newmask);
  165. for_each_cpu(cpu, newmask) {
  166. if (cpu_rover++ >= target) {
  167. ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
  168. ret = 0;
  169. goto out;
  170. }
  171. }
  172. /* Shouldn't happen */
  173. WARN_ON(1);
  174. out:
  175. free_cpumask_var(newmask);
  176. freeavail:
  177. free_cpumask_var(avail);
  178. ret:
  179. if (ret < 0) {
  180. ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
  181. pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
  182. hwirq, ics->hwirq_cpu_map[index]);
  183. }
  184. return ret;
  185. }
  186. static void alloc_irq_map(struct wsp_ics *ics)
  187. {
  188. int i;
  189. ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
  190. if (!ics->hwirq_cpu_map) {
  191. pr_warning("Allocate hwirq_cpu_map failed, "
  192. "IRQ balancing disabled\n");
  193. return;
  194. }
  195. for (i=0; i < ics->count; i++)
  196. ics->hwirq_cpu_map[i] = xics_default_server;
  197. }
  198. static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
  199. {
  200. int index = hwirq - ics->hwirq_start;
  201. BUG_ON(index < 0 || index >= ics->count);
  202. if (!ics->hwirq_cpu_map)
  203. return xics_default_server;
  204. return ics->hwirq_cpu_map[index];
  205. }
  206. #else /* !CONFIG_SMP */
  207. static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
  208. const cpumask_t *affinity)
  209. {
  210. return 0;
  211. }
  212. static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
  213. {
  214. return xics_default_server;
  215. }
  216. static void alloc_irq_map(struct wsp_ics *ics) { }
  217. #endif
  218. static void wsp_chip_unmask_irq(struct irq_data *d)
  219. {
  220. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  221. struct wsp_ics *ics;
  222. int server;
  223. u64 xive;
  224. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  225. return;
  226. ics = d->chip_data;
  227. if (WARN_ON(!ics))
  228. return;
  229. server = get_irq_server(ics, hw_irq);
  230. xive = wsp_ics_get_xive(ics, hw_irq);
  231. xive = xive_set_server(xive, server);
  232. xive = xive_set_priority(xive, DEFAULT_PRIORITY);
  233. wsp_ics_set_xive(ics, hw_irq, xive);
  234. }
  235. static unsigned int wsp_chip_startup(struct irq_data *d)
  236. {
  237. /* unmask it */
  238. wsp_chip_unmask_irq(d);
  239. return 0;
  240. }
  241. static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
  242. {
  243. u64 xive;
  244. if (hw_irq == XICS_IPI)
  245. return;
  246. if (WARN_ON(!ics))
  247. return;
  248. xive = wsp_ics_get_xive(ics, hw_irq);
  249. xive = xive_set_server(xive, xics_default_server);
  250. xive = xive_set_priority(xive, LOWEST_PRIORITY);
  251. wsp_ics_set_xive(ics, hw_irq, xive);
  252. }
  253. static void wsp_chip_mask_irq(struct irq_data *d)
  254. {
  255. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  256. struct wsp_ics *ics = d->chip_data;
  257. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  258. return;
  259. wsp_mask_real_irq(hw_irq, ics);
  260. }
  261. static int wsp_chip_set_affinity(struct irq_data *d,
  262. const struct cpumask *cpumask, bool force)
  263. {
  264. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  265. struct wsp_ics *ics;
  266. int ret;
  267. u64 xive;
  268. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  269. return -1;
  270. ics = d->chip_data;
  271. if (WARN_ON(!ics))
  272. return -1;
  273. xive = wsp_ics_get_xive(ics, hw_irq);
  274. /*
  275. * For the moment only implement delivery to all cpus or one cpu.
  276. * Get current irq_server for the given irq
  277. */
  278. ret = cache_hwirq_map(ics, hw_irq, cpumask);
  279. if (ret == -1) {
  280. char cpulist[128];
  281. cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
  282. pr_warning("%s: No online cpus in the mask %s for irq %d\n",
  283. __func__, cpulist, d->irq);
  284. return -1;
  285. } else if (ret == -ENOMEM) {
  286. pr_warning("%s: Out of memory\n", __func__);
  287. return -1;
  288. }
  289. xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
  290. wsp_ics_set_xive(ics, hw_irq, xive);
  291. return 0;
  292. }
  293. static struct irq_chip wsp_irq_chip = {
  294. .name = "WSP ICS",
  295. .irq_startup = wsp_chip_startup,
  296. .irq_mask = wsp_chip_mask_irq,
  297. .irq_unmask = wsp_chip_unmask_irq,
  298. .irq_set_affinity = wsp_chip_set_affinity
  299. };
  300. static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
  301. {
  302. /* All ICSs in the system implement a global irq number space,
  303. * so match against them all. */
  304. return of_device_is_compatible(dn, "ibm,ppc-xics");
  305. }
  306. static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
  307. {
  308. if (hwirq >= wsp_ics->hwirq_start &&
  309. hwirq < wsp_ics->hwirq_start + wsp_ics->count)
  310. return 1;
  311. return 0;
  312. }
  313. static int wsp_ics_map(struct ics *ics, unsigned int virq)
  314. {
  315. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  316. unsigned int hw_irq = virq_to_hw(virq);
  317. unsigned long flags;
  318. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  319. return -ENOENT;
  320. irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
  321. irq_set_chip_data(virq, wsp_ics);
  322. spin_lock_irqsave(&wsp_ics->lock, flags);
  323. bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
  324. spin_unlock_irqrestore(&wsp_ics->lock, flags);
  325. return 0;
  326. }
  327. static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
  328. {
  329. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  330. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  331. return;
  332. pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
  333. wsp_mask_real_irq(hw_irq, wsp_ics);
  334. }
  335. static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
  336. {
  337. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  338. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  339. return -ENOENT;
  340. return get_irq_server(wsp_ics, hw_irq);
  341. }
  342. /* HW Number allocation API */
  343. static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
  344. {
  345. struct device_node *iparent;
  346. int i;
  347. iparent = of_irq_find_parent(dn);
  348. if (!iparent) {
  349. pr_err("wsp_ics: Failed to find interrupt parent!\n");
  350. return NULL;
  351. }
  352. for(i = 0; i < num_ics; i++) {
  353. if(ics_list[i].dn == iparent)
  354. break;
  355. }
  356. if (i >= num_ics) {
  357. pr_err("wsp_ics: Unable to find parent bitmap!\n");
  358. return NULL;
  359. }
  360. return &ics_list[i];
  361. }
  362. int wsp_ics_alloc_irq(struct device_node *dn, int num)
  363. {
  364. struct wsp_ics *ics;
  365. int order, offset;
  366. ics = wsp_ics_find_dn_ics(dn);
  367. if (!ics)
  368. return -ENODEV;
  369. /* Fast, but overly strict if num isn't a power of two */
  370. order = get_count_order(num);
  371. spin_lock_irq(&ics->lock);
  372. offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
  373. spin_unlock_irq(&ics->lock);
  374. if (offset < 0)
  375. return offset;
  376. return offset + ics->hwirq_start;
  377. }
  378. void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
  379. {
  380. struct wsp_ics *ics;
  381. ics = wsp_ics_find_dn_ics(dn);
  382. if (WARN_ON(!ics))
  383. return;
  384. spin_lock_irq(&ics->lock);
  385. bitmap_release_region(ics->bitmap, irq, 0);
  386. spin_unlock_irq(&ics->lock);
  387. }
  388. /* Initialisation */
  389. static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
  390. struct device_node *dn)
  391. {
  392. int len, i, j, size;
  393. u32 start, count;
  394. const u32 *p;
  395. size = BITS_TO_LONGS(ics->count) * sizeof(long);
  396. ics->bitmap = kzalloc(size, GFP_KERNEL);
  397. if (!ics->bitmap) {
  398. pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
  399. return -ENOMEM;
  400. }
  401. spin_lock_init(&ics->lock);
  402. p = of_get_property(dn, "available-ranges", &len);
  403. if (!p || !len) {
  404. /* FIXME this should be a WARN() once mambo is updated */
  405. pr_err("wsp_ics: No available-ranges defined for %s\n",
  406. dn->full_name);
  407. return 0;
  408. }
  409. if (len % (2 * sizeof(u32)) != 0) {
  410. /* FIXME this should be a WARN() once mambo is updated */
  411. pr_err("wsp_ics: Invalid available-ranges for %s\n",
  412. dn->full_name);
  413. return 0;
  414. }
  415. bitmap_fill(ics->bitmap, ics->count);
  416. for (i = 0; i < len / sizeof(u32); i += 2) {
  417. start = of_read_number(p + i, 1);
  418. count = of_read_number(p + i + 1, 1);
  419. pr_devel("%s: start: %d count: %d\n", __func__, start, count);
  420. if ((start + count) > (ics->hwirq_start + ics->count) ||
  421. start < ics->hwirq_start) {
  422. pr_err("wsp_ics: Invalid range! -> %d to %d\n",
  423. start, start + count);
  424. break;
  425. }
  426. for (j = 0; j < count; j++)
  427. bitmap_release_region(ics->bitmap,
  428. (start + j) - ics->hwirq_start, 0);
  429. }
  430. /* Ensure LSIs are not available for allocation */
  431. bitmap_allocate_region(ics->bitmap, ics->lsi_base,
  432. get_count_order(ics->lsi_count));
  433. return 0;
  434. }
  435. static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
  436. {
  437. u32 lsi_buid, msi_buid, msi_base, msi_count;
  438. void __iomem *regs;
  439. const u32 *p;
  440. int rc, len, i;
  441. u64 caps, buid;
  442. p = of_get_property(dn, "interrupt-ranges", &len);
  443. if (!p || len < (2 * sizeof(u32))) {
  444. pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
  445. dn->full_name);
  446. return -ENOENT;
  447. }
  448. if (len > (2 * sizeof(u32))) {
  449. pr_err("wsp_ics: Multiple ics ranges not supported.\n");
  450. return -EINVAL;
  451. }
  452. regs = of_iomap(dn, 0);
  453. if (!regs) {
  454. pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
  455. return -ENXIO;
  456. }
  457. ics->hwirq_start = of_read_number(p, 1);
  458. ics->count = of_read_number(p + 1, 1);
  459. ics->regs = regs;
  460. ics->chip_id = wsp_get_chip_id(dn);
  461. if (WARN_ON(ics->chip_id < 0))
  462. ics->chip_id = 0;
  463. /* Get some informations about the critter */
  464. caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
  465. buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
  466. ics->lsi_count = caps >> 56;
  467. msi_count = (caps >> 44) & 0x7ff;
  468. /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
  469. * rest is mixed in the interrupt number. We store the whole
  470. * thing though
  471. */
  472. lsi_buid = (buid >> 48) & 0x1ff;
  473. ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
  474. msi_buid = (buid >> 37) & 0x7;
  475. msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
  476. pr_info("wsp_ics: Found %s\n", dn->full_name);
  477. pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
  478. ics->hwirq_start, ics->hwirq_start + ics->count - 1);
  479. pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
  480. ics->lsi_count, ics->lsi_base,
  481. ics->lsi_base + ics->lsi_count - 1);
  482. pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
  483. msi_count, msi_base,
  484. msi_base + msi_count - 1);
  485. /* Let's check the HW config is sane */
  486. if (ics->lsi_base < ics->hwirq_start ||
  487. (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
  488. pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
  489. if (msi_base < ics->hwirq_start ||
  490. (msi_base + msi_count) > (ics->hwirq_start + ics->count))
  491. pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
  492. /* We don't check for overlap between LSI and MSI, which will happen
  493. * if we use the same BUID, I'm not sure yet how legit that is.
  494. */
  495. rc = wsp_ics_bitmap_setup(ics, dn);
  496. if (rc) {
  497. iounmap(regs);
  498. return rc;
  499. }
  500. ics->dn = of_node_get(dn);
  501. alloc_irq_map(ics);
  502. for(i = 0; i < ics->count; i++)
  503. wsp_mask_real_irq(ics->hwirq_start + i, ics);
  504. ics->ics.map = wsp_ics_map;
  505. ics->ics.mask_unknown = wsp_ics_mask_unknown;
  506. ics->ics.get_server = wsp_ics_get_server;
  507. ics->ics.host_match = wsp_ics_host_match;
  508. xics_register_ics(&ics->ics);
  509. return 0;
  510. }
  511. static void __init wsp_ics_set_default_server(void)
  512. {
  513. struct device_node *np;
  514. u32 hwid;
  515. /* Find the server number for the boot cpu. */
  516. np = of_get_cpu_node(boot_cpuid, NULL);
  517. BUG_ON(!np);
  518. hwid = get_hard_smp_processor_id(boot_cpuid);
  519. pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
  520. xics_default_server = hwid;
  521. of_node_put(np);
  522. }
  523. static int __init wsp_ics_init(void)
  524. {
  525. struct device_node *dn;
  526. struct wsp_ics *ics;
  527. int rc, found;
  528. wsp_ics_set_default_server();
  529. found = 0;
  530. for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
  531. found++;
  532. if (found == 0) {
  533. pr_err("wsp_ics: No ICS's found!\n");
  534. return -ENODEV;
  535. }
  536. ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
  537. if (!ics_list) {
  538. pr_err("wsp_ics: No memory for structs.\n");
  539. return -ENOMEM;
  540. }
  541. num_ics = 0;
  542. ics = ics_list;
  543. for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
  544. rc = wsp_ics_setup(ics, dn);
  545. if (rc == 0) {
  546. ics++;
  547. num_ics++;
  548. }
  549. }
  550. if (found != num_ics) {
  551. pr_err("wsp_ics: Failed setting up %d ICS's\n",
  552. found - num_ics);
  553. return -1;
  554. }
  555. return 0;
  556. }
  557. void __init wsp_init_irq(void)
  558. {
  559. wsp_ics_init();
  560. xics_init();
  561. /* We need to patch our irq chip's EOI to point to the right ICP */
  562. wsp_irq_chip.irq_eoi = icp_ops->eoi;
  563. }
  564. #ifdef CONFIG_PCI_MSI
  565. static void wsp_ics_msi_unmask_irq(struct irq_data *d)
  566. {
  567. wsp_chip_unmask_irq(d);
  568. unmask_msi_irq(d);
  569. }
  570. static unsigned int wsp_ics_msi_startup(struct irq_data *d)
  571. {
  572. wsp_ics_msi_unmask_irq(d);
  573. return 0;
  574. }
  575. static void wsp_ics_msi_mask_irq(struct irq_data *d)
  576. {
  577. mask_msi_irq(d);
  578. wsp_chip_mask_irq(d);
  579. }
  580. /*
  581. * we do it this way because we reassinge default EOI handling in
  582. * irq_init() above
  583. */
  584. static void wsp_ics_eoi(struct irq_data *data)
  585. {
  586. wsp_irq_chip.irq_eoi(data);
  587. }
  588. static struct irq_chip wsp_ics_msi = {
  589. .name = "WSP ICS MSI",
  590. .irq_startup = wsp_ics_msi_startup,
  591. .irq_mask = wsp_ics_msi_mask_irq,
  592. .irq_unmask = wsp_ics_msi_unmask_irq,
  593. .irq_eoi = wsp_ics_eoi,
  594. .irq_set_affinity = wsp_chip_set_affinity
  595. };
  596. void wsp_ics_set_msi_chip(unsigned int irq)
  597. {
  598. irq_set_chip(irq, &wsp_ics_msi);
  599. }
  600. void wsp_ics_set_std_chip(unsigned int irq)
  601. {
  602. irq_set_chip(irq, &wsp_irq_chip);
  603. }
  604. #endif /* CONFIG_PCI_MSI */