irq_64.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /* irq.c: UltraSparc IRQ handling/init/registry.
  2. *
  3. * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  5. * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/linkage.h>
  9. #include <linux/ptrace.h>
  10. #include <linux/errno.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/signal.h>
  13. #include <linux/mm.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/random.h>
  17. #include <linux/init.h>
  18. #include <linux/delay.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/ftrace.h>
  22. #include <linux/irq.h>
  23. #include <linux/kmemleak.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/processor.h>
  26. #include <linux/atomic.h>
  27. #include <asm/irq.h>
  28. #include <asm/io.h>
  29. #include <asm/iommu.h>
  30. #include <asm/upa.h>
  31. #include <asm/oplib.h>
  32. #include <asm/prom.h>
  33. #include <asm/timer.h>
  34. #include <asm/smp.h>
  35. #include <asm/starfire.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/cache.h>
  38. #include <asm/cpudata.h>
  39. #include <asm/auxio.h>
  40. #include <asm/head.h>
  41. #include <asm/hypervisor.h>
  42. #include <asm/cacheflush.h>
  43. #include "entry.h"
  44. #include "cpumap.h"
  45. #include "kstack.h"
  46. #define NUM_IVECS (IMAP_INR + 1)
  47. struct ino_bucket *ivector_table;
  48. unsigned long ivector_table_pa;
  49. /* On several sun4u processors, it is illegal to mix bypass and
  50. * non-bypass accesses. Therefore we access all INO buckets
  51. * using bypass accesses only.
  52. */
  53. static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
  54. {
  55. unsigned long ret;
  56. __asm__ __volatile__("ldxa [%1] %2, %0"
  57. : "=&r" (ret)
  58. : "r" (bucket_pa +
  59. offsetof(struct ino_bucket,
  60. __irq_chain_pa)),
  61. "i" (ASI_PHYS_USE_EC));
  62. return ret;
  63. }
  64. static void bucket_clear_chain_pa(unsigned long bucket_pa)
  65. {
  66. __asm__ __volatile__("stxa %%g0, [%0] %1"
  67. : /* no outputs */
  68. : "r" (bucket_pa +
  69. offsetof(struct ino_bucket,
  70. __irq_chain_pa)),
  71. "i" (ASI_PHYS_USE_EC));
  72. }
  73. static unsigned int bucket_get_irq(unsigned long bucket_pa)
  74. {
  75. unsigned int ret;
  76. __asm__ __volatile__("lduwa [%1] %2, %0"
  77. : "=&r" (ret)
  78. : "r" (bucket_pa +
  79. offsetof(struct ino_bucket,
  80. __irq)),
  81. "i" (ASI_PHYS_USE_EC));
  82. return ret;
  83. }
  84. static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
  85. {
  86. __asm__ __volatile__("stwa %0, [%1] %2"
  87. : /* no outputs */
  88. : "r" (irq),
  89. "r" (bucket_pa +
  90. offsetof(struct ino_bucket,
  91. __irq)),
  92. "i" (ASI_PHYS_USE_EC));
  93. }
  94. #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
  95. static struct {
  96. unsigned int dev_handle;
  97. unsigned int dev_ino;
  98. unsigned int in_use;
  99. } irq_table[NR_IRQS];
  100. static DEFINE_SPINLOCK(irq_alloc_lock);
  101. unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
  102. {
  103. unsigned long flags;
  104. unsigned char ent;
  105. BUILD_BUG_ON(NR_IRQS >= 256);
  106. spin_lock_irqsave(&irq_alloc_lock, flags);
  107. for (ent = 1; ent < NR_IRQS; ent++) {
  108. if (!irq_table[ent].in_use)
  109. break;
  110. }
  111. if (ent >= NR_IRQS) {
  112. printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
  113. ent = 0;
  114. } else {
  115. irq_table[ent].dev_handle = dev_handle;
  116. irq_table[ent].dev_ino = dev_ino;
  117. irq_table[ent].in_use = 1;
  118. }
  119. spin_unlock_irqrestore(&irq_alloc_lock, flags);
  120. return ent;
  121. }
  122. #ifdef CONFIG_PCI_MSI
  123. void irq_free(unsigned int irq)
  124. {
  125. unsigned long flags;
  126. if (irq >= NR_IRQS)
  127. return;
  128. spin_lock_irqsave(&irq_alloc_lock, flags);
  129. irq_table[irq].in_use = 0;
  130. spin_unlock_irqrestore(&irq_alloc_lock, flags);
  131. }
  132. #endif
  133. /*
  134. * /proc/interrupts printing:
  135. */
  136. int arch_show_interrupts(struct seq_file *p, int prec)
  137. {
  138. int j;
  139. seq_printf(p, "NMI: ");
  140. for_each_online_cpu(j)
  141. seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
  142. seq_printf(p, " Non-maskable interrupts\n");
  143. return 0;
  144. }
  145. static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
  146. {
  147. unsigned int tid;
  148. if (this_is_starfire) {
  149. tid = starfire_translate(imap, cpuid);
  150. tid <<= IMAP_TID_SHIFT;
  151. tid &= IMAP_TID_UPA;
  152. } else {
  153. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  154. unsigned long ver;
  155. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  156. if ((ver >> 32UL) == __JALAPENO_ID ||
  157. (ver >> 32UL) == __SERRANO_ID) {
  158. tid = cpuid << IMAP_TID_SHIFT;
  159. tid &= IMAP_TID_JBUS;
  160. } else {
  161. unsigned int a = cpuid & 0x1f;
  162. unsigned int n = (cpuid >> 5) & 0x1f;
  163. tid = ((a << IMAP_AID_SHIFT) |
  164. (n << IMAP_NID_SHIFT));
  165. tid &= (IMAP_AID_SAFARI |
  166. IMAP_NID_SAFARI);
  167. }
  168. } else {
  169. tid = cpuid << IMAP_TID_SHIFT;
  170. tid &= IMAP_TID_UPA;
  171. }
  172. }
  173. return tid;
  174. }
  175. struct irq_handler_data {
  176. unsigned long iclr;
  177. unsigned long imap;
  178. void (*pre_handler)(unsigned int, void *, void *);
  179. void *arg1;
  180. void *arg2;
  181. };
  182. #ifdef CONFIG_SMP
  183. static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
  184. {
  185. cpumask_t mask;
  186. int cpuid;
  187. cpumask_copy(&mask, affinity);
  188. if (cpumask_equal(&mask, cpu_online_mask)) {
  189. cpuid = map_to_cpu(irq);
  190. } else {
  191. cpumask_t tmp;
  192. cpumask_and(&tmp, cpu_online_mask, &mask);
  193. cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
  194. }
  195. return cpuid;
  196. }
  197. #else
  198. #define irq_choose_cpu(irq, affinity) \
  199. real_hard_smp_processor_id()
  200. #endif
  201. static void sun4u_irq_enable(struct irq_data *data)
  202. {
  203. struct irq_handler_data *handler_data = data->handler_data;
  204. if (likely(handler_data)) {
  205. unsigned long cpuid, imap, val;
  206. unsigned int tid;
  207. cpuid = irq_choose_cpu(data->irq, data->affinity);
  208. imap = handler_data->imap;
  209. tid = sun4u_compute_tid(imap, cpuid);
  210. val = upa_readq(imap);
  211. val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
  212. IMAP_AID_SAFARI | IMAP_NID_SAFARI);
  213. val |= tid | IMAP_VALID;
  214. upa_writeq(val, imap);
  215. upa_writeq(ICLR_IDLE, handler_data->iclr);
  216. }
  217. }
  218. static int sun4u_set_affinity(struct irq_data *data,
  219. const struct cpumask *mask, bool force)
  220. {
  221. struct irq_handler_data *handler_data = data->handler_data;
  222. if (likely(handler_data)) {
  223. unsigned long cpuid, imap, val;
  224. unsigned int tid;
  225. cpuid = irq_choose_cpu(data->irq, mask);
  226. imap = handler_data->imap;
  227. tid = sun4u_compute_tid(imap, cpuid);
  228. val = upa_readq(imap);
  229. val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
  230. IMAP_AID_SAFARI | IMAP_NID_SAFARI);
  231. val |= tid | IMAP_VALID;
  232. upa_writeq(val, imap);
  233. upa_writeq(ICLR_IDLE, handler_data->iclr);
  234. }
  235. return 0;
  236. }
  237. /* Don't do anything. The desc->status check for IRQ_DISABLED in
  238. * handler_irq() will skip the handler call and that will leave the
  239. * interrupt in the sent state. The next ->enable() call will hit the
  240. * ICLR register to reset the state machine.
  241. *
  242. * This scheme is necessary, instead of clearing the Valid bit in the
  243. * IMAP register, to handle the case of IMAP registers being shared by
  244. * multiple INOs (and thus ICLR registers). Since we use a different
  245. * virtual IRQ for each shared IMAP instance, the generic code thinks
  246. * there is only one user so it prematurely calls ->disable() on
  247. * free_irq().
  248. *
  249. * We have to provide an explicit ->disable() method instead of using
  250. * NULL to get the default. The reason is that if the generic code
  251. * sees that, it also hooks up a default ->shutdown method which
  252. * invokes ->mask() which we do not want. See irq_chip_set_defaults().
  253. */
  254. static void sun4u_irq_disable(struct irq_data *data)
  255. {
  256. }
  257. static void sun4u_irq_eoi(struct irq_data *data)
  258. {
  259. struct irq_handler_data *handler_data = data->handler_data;
  260. if (likely(handler_data))
  261. upa_writeq(ICLR_IDLE, handler_data->iclr);
  262. }
  263. static void sun4v_irq_enable(struct irq_data *data)
  264. {
  265. unsigned int ino = irq_table[data->irq].dev_ino;
  266. unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
  267. int err;
  268. err = sun4v_intr_settarget(ino, cpuid);
  269. if (err != HV_EOK)
  270. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  271. "err(%d)\n", ino, cpuid, err);
  272. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  273. if (err != HV_EOK)
  274. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  275. "err(%d)\n", ino, err);
  276. err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
  277. if (err != HV_EOK)
  278. printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
  279. ino, err);
  280. }
  281. static int sun4v_set_affinity(struct irq_data *data,
  282. const struct cpumask *mask, bool force)
  283. {
  284. unsigned int ino = irq_table[data->irq].dev_ino;
  285. unsigned long cpuid = irq_choose_cpu(data->irq, mask);
  286. int err;
  287. err = sun4v_intr_settarget(ino, cpuid);
  288. if (err != HV_EOK)
  289. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  290. "err(%d)\n", ino, cpuid, err);
  291. return 0;
  292. }
  293. static void sun4v_irq_disable(struct irq_data *data)
  294. {
  295. unsigned int ino = irq_table[data->irq].dev_ino;
  296. int err;
  297. err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
  298. if (err != HV_EOK)
  299. printk(KERN_ERR "sun4v_intr_setenabled(%x): "
  300. "err(%d)\n", ino, err);
  301. }
  302. static void sun4v_irq_eoi(struct irq_data *data)
  303. {
  304. unsigned int ino = irq_table[data->irq].dev_ino;
  305. int err;
  306. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  307. if (err != HV_EOK)
  308. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  309. "err(%d)\n", ino, err);
  310. }
  311. static void sun4v_virq_enable(struct irq_data *data)
  312. {
  313. unsigned long cpuid, dev_handle, dev_ino;
  314. int err;
  315. cpuid = irq_choose_cpu(data->irq, data->affinity);
  316. dev_handle = irq_table[data->irq].dev_handle;
  317. dev_ino = irq_table[data->irq].dev_ino;
  318. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  319. if (err != HV_EOK)
  320. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  321. "err(%d)\n",
  322. dev_handle, dev_ino, cpuid, err);
  323. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  324. HV_INTR_STATE_IDLE);
  325. if (err != HV_EOK)
  326. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  327. "HV_INTR_STATE_IDLE): err(%d)\n",
  328. dev_handle, dev_ino, err);
  329. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  330. HV_INTR_ENABLED);
  331. if (err != HV_EOK)
  332. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  333. "HV_INTR_ENABLED): err(%d)\n",
  334. dev_handle, dev_ino, err);
  335. }
  336. static int sun4v_virt_set_affinity(struct irq_data *data,
  337. const struct cpumask *mask, bool force)
  338. {
  339. unsigned long cpuid, dev_handle, dev_ino;
  340. int err;
  341. cpuid = irq_choose_cpu(data->irq, mask);
  342. dev_handle = irq_table[data->irq].dev_handle;
  343. dev_ino = irq_table[data->irq].dev_ino;
  344. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  345. if (err != HV_EOK)
  346. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  347. "err(%d)\n",
  348. dev_handle, dev_ino, cpuid, err);
  349. return 0;
  350. }
  351. static void sun4v_virq_disable(struct irq_data *data)
  352. {
  353. unsigned long dev_handle, dev_ino;
  354. int err;
  355. dev_handle = irq_table[data->irq].dev_handle;
  356. dev_ino = irq_table[data->irq].dev_ino;
  357. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  358. HV_INTR_DISABLED);
  359. if (err != HV_EOK)
  360. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  361. "HV_INTR_DISABLED): err(%d)\n",
  362. dev_handle, dev_ino, err);
  363. }
  364. static void sun4v_virq_eoi(struct irq_data *data)
  365. {
  366. unsigned long dev_handle, dev_ino;
  367. int err;
  368. dev_handle = irq_table[data->irq].dev_handle;
  369. dev_ino = irq_table[data->irq].dev_ino;
  370. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  371. HV_INTR_STATE_IDLE);
  372. if (err != HV_EOK)
  373. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  374. "HV_INTR_STATE_IDLE): err(%d)\n",
  375. dev_handle, dev_ino, err);
  376. }
  377. static struct irq_chip sun4u_irq = {
  378. .name = "sun4u",
  379. .irq_enable = sun4u_irq_enable,
  380. .irq_disable = sun4u_irq_disable,
  381. .irq_eoi = sun4u_irq_eoi,
  382. .irq_set_affinity = sun4u_set_affinity,
  383. .flags = IRQCHIP_EOI_IF_HANDLED,
  384. };
  385. static struct irq_chip sun4v_irq = {
  386. .name = "sun4v",
  387. .irq_enable = sun4v_irq_enable,
  388. .irq_disable = sun4v_irq_disable,
  389. .irq_eoi = sun4v_irq_eoi,
  390. .irq_set_affinity = sun4v_set_affinity,
  391. .flags = IRQCHIP_EOI_IF_HANDLED,
  392. };
  393. static struct irq_chip sun4v_virq = {
  394. .name = "vsun4v",
  395. .irq_enable = sun4v_virq_enable,
  396. .irq_disable = sun4v_virq_disable,
  397. .irq_eoi = sun4v_virq_eoi,
  398. .irq_set_affinity = sun4v_virt_set_affinity,
  399. .flags = IRQCHIP_EOI_IF_HANDLED,
  400. };
  401. static void pre_flow_handler(struct irq_data *d)
  402. {
  403. struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
  404. unsigned int ino = irq_table[d->irq].dev_ino;
  405. handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
  406. }
  407. void irq_install_pre_handler(int irq,
  408. void (*func)(unsigned int, void *, void *),
  409. void *arg1, void *arg2)
  410. {
  411. struct irq_handler_data *handler_data = irq_get_handler_data(irq);
  412. handler_data->pre_handler = func;
  413. handler_data->arg1 = arg1;
  414. handler_data->arg2 = arg2;
  415. __irq_set_preflow_handler(irq, pre_flow_handler);
  416. }
  417. unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
  418. {
  419. struct ino_bucket *bucket;
  420. struct irq_handler_data *handler_data;
  421. unsigned int irq;
  422. int ino;
  423. BUG_ON(tlb_type == hypervisor);
  424. ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
  425. bucket = &ivector_table[ino];
  426. irq = bucket_get_irq(__pa(bucket));
  427. if (!irq) {
  428. irq = irq_alloc(0, ino);
  429. bucket_set_irq(__pa(bucket), irq);
  430. irq_set_chip_and_handler_name(irq, &sun4u_irq,
  431. handle_fasteoi_irq, "IVEC");
  432. }
  433. handler_data = irq_get_handler_data(irq);
  434. if (unlikely(handler_data))
  435. goto out;
  436. handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  437. if (unlikely(!handler_data)) {
  438. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  439. prom_halt();
  440. }
  441. irq_set_handler_data(irq, handler_data);
  442. handler_data->imap = imap;
  443. handler_data->iclr = iclr;
  444. out:
  445. return irq;
  446. }
  447. static unsigned int sun4v_build_common(unsigned long sysino,
  448. struct irq_chip *chip)
  449. {
  450. struct ino_bucket *bucket;
  451. struct irq_handler_data *handler_data;
  452. unsigned int irq;
  453. BUG_ON(tlb_type != hypervisor);
  454. bucket = &ivector_table[sysino];
  455. irq = bucket_get_irq(__pa(bucket));
  456. if (!irq) {
  457. irq = irq_alloc(0, sysino);
  458. bucket_set_irq(__pa(bucket), irq);
  459. irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
  460. "IVEC");
  461. }
  462. handler_data = irq_get_handler_data(irq);
  463. if (unlikely(handler_data))
  464. goto out;
  465. handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  466. if (unlikely(!handler_data)) {
  467. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  468. prom_halt();
  469. }
  470. irq_set_handler_data(irq, handler_data);
  471. /* Catch accidental accesses to these things. IMAP/ICLR handling
  472. * is done by hypervisor calls on sun4v platforms, not by direct
  473. * register accesses.
  474. */
  475. handler_data->imap = ~0UL;
  476. handler_data->iclr = ~0UL;
  477. out:
  478. return irq;
  479. }
  480. unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
  481. {
  482. unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
  483. return sun4v_build_common(sysino, &sun4v_irq);
  484. }
  485. unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
  486. {
  487. struct irq_handler_data *handler_data;
  488. unsigned long hv_err, cookie;
  489. struct ino_bucket *bucket;
  490. unsigned int irq;
  491. bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
  492. if (unlikely(!bucket))
  493. return 0;
  494. /* The only reference we store to the IRQ bucket is
  495. * by physical address which kmemleak can't see, tell
  496. * it that this object explicitly is not a leak and
  497. * should be scanned.
  498. */
  499. kmemleak_not_leak(bucket);
  500. __flush_dcache_range((unsigned long) bucket,
  501. ((unsigned long) bucket +
  502. sizeof(struct ino_bucket)));
  503. irq = irq_alloc(devhandle, devino);
  504. bucket_set_irq(__pa(bucket), irq);
  505. irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
  506. "IVEC");
  507. handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  508. if (unlikely(!handler_data))
  509. return 0;
  510. /* In order to make the LDC channel startup sequence easier,
  511. * especially wrt. locking, we do not let request_irq() enable
  512. * the interrupt.
  513. */
  514. irq_set_status_flags(irq, IRQ_NOAUTOEN);
  515. irq_set_handler_data(irq, handler_data);
  516. /* Catch accidental accesses to these things. IMAP/ICLR handling
  517. * is done by hypervisor calls on sun4v platforms, not by direct
  518. * register accesses.
  519. */
  520. handler_data->imap = ~0UL;
  521. handler_data->iclr = ~0UL;
  522. cookie = ~__pa(bucket);
  523. hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
  524. if (hv_err) {
  525. prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
  526. "err=%lu\n", devhandle, devino, hv_err);
  527. prom_halt();
  528. }
  529. return irq;
  530. }
  531. void ack_bad_irq(unsigned int irq)
  532. {
  533. unsigned int ino = irq_table[irq].dev_ino;
  534. if (!ino)
  535. ino = 0xdeadbeef;
  536. printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
  537. ino, irq);
  538. }
  539. void *hardirq_stack[NR_CPUS];
  540. void *softirq_stack[NR_CPUS];
  541. void __irq_entry handler_irq(int pil, struct pt_regs *regs)
  542. {
  543. unsigned long pstate, bucket_pa;
  544. struct pt_regs *old_regs;
  545. void *orig_sp;
  546. clear_softint(1 << pil);
  547. old_regs = set_irq_regs(regs);
  548. irq_enter();
  549. /* Grab an atomic snapshot of the pending IVECs. */
  550. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  551. "wrpr %0, %3, %%pstate\n\t"
  552. "ldx [%2], %1\n\t"
  553. "stx %%g0, [%2]\n\t"
  554. "wrpr %0, 0x0, %%pstate\n\t"
  555. : "=&r" (pstate), "=&r" (bucket_pa)
  556. : "r" (irq_work_pa(smp_processor_id())),
  557. "i" (PSTATE_IE)
  558. : "memory");
  559. orig_sp = set_hardirq_stack();
  560. while (bucket_pa) {
  561. unsigned long next_pa;
  562. unsigned int irq;
  563. next_pa = bucket_get_chain_pa(bucket_pa);
  564. irq = bucket_get_irq(bucket_pa);
  565. bucket_clear_chain_pa(bucket_pa);
  566. generic_handle_irq(irq);
  567. bucket_pa = next_pa;
  568. }
  569. restore_hardirq_stack(orig_sp);
  570. irq_exit();
  571. set_irq_regs(old_regs);
  572. }
  573. void do_softirq(void)
  574. {
  575. unsigned long flags;
  576. if (in_interrupt())
  577. return;
  578. local_irq_save(flags);
  579. if (local_softirq_pending()) {
  580. void *orig_sp, *sp = softirq_stack[smp_processor_id()];
  581. sp += THREAD_SIZE - 192 - STACK_BIAS;
  582. __asm__ __volatile__("mov %%sp, %0\n\t"
  583. "mov %1, %%sp"
  584. : "=&r" (orig_sp)
  585. : "r" (sp));
  586. __do_softirq();
  587. __asm__ __volatile__("mov %0, %%sp"
  588. : : "r" (orig_sp));
  589. }
  590. local_irq_restore(flags);
  591. }
  592. #ifdef CONFIG_HOTPLUG_CPU
  593. void fixup_irqs(void)
  594. {
  595. unsigned int irq;
  596. for (irq = 0; irq < NR_IRQS; irq++) {
  597. struct irq_desc *desc = irq_to_desc(irq);
  598. struct irq_data *data = irq_desc_get_irq_data(desc);
  599. unsigned long flags;
  600. raw_spin_lock_irqsave(&desc->lock, flags);
  601. if (desc->action && !irqd_is_per_cpu(data)) {
  602. if (data->chip->irq_set_affinity)
  603. data->chip->irq_set_affinity(data,
  604. data->affinity,
  605. false);
  606. }
  607. raw_spin_unlock_irqrestore(&desc->lock, flags);
  608. }
  609. tick_ops->disable_irq();
  610. }
  611. #endif
  612. struct sun5_timer {
  613. u64 count0;
  614. u64 limit0;
  615. u64 count1;
  616. u64 limit1;
  617. };
  618. static struct sun5_timer *prom_timers;
  619. static u64 prom_limit0, prom_limit1;
  620. static void map_prom_timers(void)
  621. {
  622. struct device_node *dp;
  623. const unsigned int *addr;
  624. /* PROM timer node hangs out in the top level of device siblings... */
  625. dp = of_find_node_by_path("/");
  626. dp = dp->child;
  627. while (dp) {
  628. if (!strcmp(dp->name, "counter-timer"))
  629. break;
  630. dp = dp->sibling;
  631. }
  632. /* Assume if node is not present, PROM uses different tick mechanism
  633. * which we should not care about.
  634. */
  635. if (!dp) {
  636. prom_timers = (struct sun5_timer *) 0;
  637. return;
  638. }
  639. /* If PROM is really using this, it must be mapped by him. */
  640. addr = of_get_property(dp, "address", NULL);
  641. if (!addr) {
  642. prom_printf("PROM does not have timer mapped, trying to continue.\n");
  643. prom_timers = (struct sun5_timer *) 0;
  644. return;
  645. }
  646. prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
  647. }
  648. static void kill_prom_timer(void)
  649. {
  650. if (!prom_timers)
  651. return;
  652. /* Save them away for later. */
  653. prom_limit0 = prom_timers->limit0;
  654. prom_limit1 = prom_timers->limit1;
  655. /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
  656. * We turn both off here just to be paranoid.
  657. */
  658. prom_timers->limit0 = 0;
  659. prom_timers->limit1 = 0;
  660. /* Wheee, eat the interrupt packet too... */
  661. __asm__ __volatile__(
  662. " mov 0x40, %%g2\n"
  663. " ldxa [%%g0] %0, %%g1\n"
  664. " ldxa [%%g2] %1, %%g1\n"
  665. " stxa %%g0, [%%g0] %0\n"
  666. " membar #Sync\n"
  667. : /* no outputs */
  668. : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
  669. : "g1", "g2");
  670. }
  671. void notrace init_irqwork_curcpu(void)
  672. {
  673. int cpu = hard_smp_processor_id();
  674. trap_block[cpu].irq_worklist_pa = 0UL;
  675. }
  676. /* Please be very careful with register_one_mondo() and
  677. * sun4v_register_mondo_queues().
  678. *
  679. * On SMP this gets invoked from the CPU trampoline before
  680. * the cpu has fully taken over the trap table from OBP,
  681. * and it's kernel stack + %g6 thread register state is
  682. * not fully cooked yet.
  683. *
  684. * Therefore you cannot make any OBP calls, not even prom_printf,
  685. * from these two routines.
  686. */
  687. static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
  688. {
  689. unsigned long num_entries = (qmask + 1) / 64;
  690. unsigned long status;
  691. status = sun4v_cpu_qconf(type, paddr, num_entries);
  692. if (status != HV_EOK) {
  693. prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
  694. "err %lu\n", type, paddr, num_entries, status);
  695. prom_halt();
  696. }
  697. }
  698. void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
  699. {
  700. struct trap_per_cpu *tb = &trap_block[this_cpu];
  701. register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
  702. tb->cpu_mondo_qmask);
  703. register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
  704. tb->dev_mondo_qmask);
  705. register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
  706. tb->resum_qmask);
  707. register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
  708. tb->nonresum_qmask);
  709. }
  710. /* Each queue region must be a power of 2 multiple of 64 bytes in
  711. * size. The base real address must be aligned to the size of the
  712. * region. Thus, an 8KB queue must be 8KB aligned, for example.
  713. */
  714. static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
  715. {
  716. unsigned long size = PAGE_ALIGN(qmask + 1);
  717. unsigned long order = get_order(size);
  718. unsigned long p;
  719. p = __get_free_pages(GFP_KERNEL, order);
  720. if (!p) {
  721. prom_printf("SUN4V: Error, cannot allocate queue.\n");
  722. prom_halt();
  723. }
  724. *pa_ptr = __pa(p);
  725. }
  726. static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
  727. {
  728. #ifdef CONFIG_SMP
  729. unsigned long page;
  730. BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
  731. page = get_zeroed_page(GFP_KERNEL);
  732. if (!page) {
  733. prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
  734. prom_halt();
  735. }
  736. tb->cpu_mondo_block_pa = __pa(page);
  737. tb->cpu_list_pa = __pa(page + 64);
  738. #endif
  739. }
  740. /* Allocate mondo and error queues for all possible cpus. */
  741. static void __init sun4v_init_mondo_queues(void)
  742. {
  743. int cpu;
  744. for_each_possible_cpu(cpu) {
  745. struct trap_per_cpu *tb = &trap_block[cpu];
  746. alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
  747. alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
  748. alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
  749. alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
  750. alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
  751. alloc_one_queue(&tb->nonresum_kernel_buf_pa,
  752. tb->nonresum_qmask);
  753. }
  754. }
  755. static void __init init_send_mondo_info(void)
  756. {
  757. int cpu;
  758. for_each_possible_cpu(cpu) {
  759. struct trap_per_cpu *tb = &trap_block[cpu];
  760. init_cpu_send_mondo_info(tb);
  761. }
  762. }
  763. static struct irqaction timer_irq_action = {
  764. .name = "timer",
  765. };
  766. /* Only invoked on boot processor. */
  767. void __init init_IRQ(void)
  768. {
  769. unsigned long size;
  770. map_prom_timers();
  771. kill_prom_timer();
  772. size = sizeof(struct ino_bucket) * NUM_IVECS;
  773. ivector_table = kzalloc(size, GFP_KERNEL);
  774. if (!ivector_table) {
  775. prom_printf("Fatal error, cannot allocate ivector_table\n");
  776. prom_halt();
  777. }
  778. __flush_dcache_range((unsigned long) ivector_table,
  779. ((unsigned long) ivector_table) + size);
  780. ivector_table_pa = __pa(ivector_table);
  781. if (tlb_type == hypervisor)
  782. sun4v_init_mondo_queues();
  783. init_send_mondo_info();
  784. if (tlb_type == hypervisor) {
  785. /* Load up the boot cpu's entries. */
  786. sun4v_register_mondo_queues(hard_smp_processor_id());
  787. }
  788. /* We need to clear any IRQ's pending in the soft interrupt
  789. * registers, a spurious one could be left around from the
  790. * PROM timer which we just disabled.
  791. */
  792. clear_softint(get_softint());
  793. /* Now that ivector table is initialized, it is safe
  794. * to receive IRQ vector traps. We will normally take
  795. * one or two right now, in case some device PROM used
  796. * to boot us wants to speak to us. We just ignore them.
  797. */
  798. __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
  799. "or %%g1, %0, %%g1\n\t"
  800. "wrpr %%g1, 0x0, %%pstate"
  801. : /* No outputs */
  802. : "i" (PSTATE_IE)
  803. : "g1");
  804. irq_to_desc(0)->action = &timer_irq_action;
  805. }