irq.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * Copyright (C) 2003, Axis Communications AB.
  3. */
  4. #include <asm/irq.h>
  5. #include <linux/irq.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/smp.h>
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/profile.h>
  12. #include <linux/of.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/proc_fs.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/threads.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/kernel_stat.h>
  19. #include <hwregs/reg_map.h>
  20. #include <hwregs/reg_rdwr.h>
  21. #include <hwregs/intr_vect.h>
  22. #include <hwregs/intr_vect_defs.h>
  23. #define CPU_FIXED -1
  24. /* IRQ masks (refer to comment for crisv32_do_multiple) */
  25. #if TIMER0_INTR_VECT - FIRST_IRQ < 32
  26. #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
  27. #undef TIMER_VECT1
  28. #else
  29. #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
  30. #define TIMER_VECT1
  31. #endif
  32. #ifdef CONFIG_ETRAX_KGDB
  33. #if defined(CONFIG_ETRAX_KGDB_PORT0)
  34. #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
  35. #elif defined(CONFIG_ETRAX_KGDB_PORT1)
  36. #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
  37. #elif defined(CONFIG_ETRAX_KGDB_PORT2)
  38. #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
  39. #elif defined(CONFIG_ETRAX_KGDB_PORT3)
  40. #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
  41. #endif
  42. #endif
  43. DEFINE_SPINLOCK(irq_lock);
  44. struct cris_irq_allocation
  45. {
  46. int cpu; /* The CPU to which the IRQ is currently allocated. */
  47. cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
  48. };
  49. struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
  50. { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
  51. static unsigned long irq_regs[NR_CPUS] =
  52. {
  53. regi_irq,
  54. };
  55. #if NR_REAL_IRQS > 32
  56. #define NBR_REGS 2
  57. #else
  58. #define NBR_REGS 1
  59. #endif
  60. unsigned long cpu_irq_counters[NR_CPUS];
  61. unsigned long irq_counters[NR_REAL_IRQS];
  62. /* From irq.c. */
  63. extern void weird_irq(void);
  64. /* From entry.S. */
  65. extern void system_call(void);
  66. extern void nmi_interrupt(void);
  67. extern void multiple_interrupt(void);
  68. extern void gdb_handle_exception(void);
  69. extern void i_mmu_refill(void);
  70. extern void i_mmu_invalid(void);
  71. extern void i_mmu_access(void);
  72. extern void i_mmu_execute(void);
  73. extern void d_mmu_refill(void);
  74. extern void d_mmu_invalid(void);
  75. extern void d_mmu_access(void);
  76. extern void d_mmu_write(void);
  77. /* From kgdb.c. */
  78. extern void kgdb_init(void);
  79. extern void breakpoint(void);
  80. /* From traps.c. */
  81. extern void breakh_BUG(void);
  82. /*
  83. * Build the IRQ handler stubs using macros from irq.h.
  84. */
  85. #ifdef CONFIG_CRIS_MACH_ARTPEC3
  86. BUILD_TIMER_IRQ(0x31, 0)
  87. #else
  88. BUILD_IRQ(0x31)
  89. #endif
  90. BUILD_IRQ(0x32)
  91. BUILD_IRQ(0x33)
  92. BUILD_IRQ(0x34)
  93. BUILD_IRQ(0x35)
  94. BUILD_IRQ(0x36)
  95. BUILD_IRQ(0x37)
  96. BUILD_IRQ(0x38)
  97. BUILD_IRQ(0x39)
  98. BUILD_IRQ(0x3a)
  99. BUILD_IRQ(0x3b)
  100. BUILD_IRQ(0x3c)
  101. BUILD_IRQ(0x3d)
  102. BUILD_IRQ(0x3e)
  103. BUILD_IRQ(0x3f)
  104. BUILD_IRQ(0x40)
  105. BUILD_IRQ(0x41)
  106. BUILD_IRQ(0x42)
  107. BUILD_IRQ(0x43)
  108. BUILD_IRQ(0x44)
  109. BUILD_IRQ(0x45)
  110. BUILD_IRQ(0x46)
  111. BUILD_IRQ(0x47)
  112. BUILD_IRQ(0x48)
  113. BUILD_IRQ(0x49)
  114. BUILD_IRQ(0x4a)
  115. #ifdef CONFIG_ETRAXFS
  116. BUILD_TIMER_IRQ(0x4b, 0)
  117. #else
  118. BUILD_IRQ(0x4b)
  119. #endif
  120. BUILD_IRQ(0x4c)
  121. BUILD_IRQ(0x4d)
  122. BUILD_IRQ(0x4e)
  123. BUILD_IRQ(0x4f)
  124. BUILD_IRQ(0x50)
  125. #if MACH_IRQS > 32
  126. BUILD_IRQ(0x51)
  127. BUILD_IRQ(0x52)
  128. BUILD_IRQ(0x53)
  129. BUILD_IRQ(0x54)
  130. BUILD_IRQ(0x55)
  131. BUILD_IRQ(0x56)
  132. BUILD_IRQ(0x57)
  133. BUILD_IRQ(0x58)
  134. BUILD_IRQ(0x59)
  135. BUILD_IRQ(0x5a)
  136. BUILD_IRQ(0x5b)
  137. BUILD_IRQ(0x5c)
  138. BUILD_IRQ(0x5d)
  139. BUILD_IRQ(0x5e)
  140. BUILD_IRQ(0x5f)
  141. BUILD_IRQ(0x60)
  142. BUILD_IRQ(0x61)
  143. BUILD_IRQ(0x62)
  144. BUILD_IRQ(0x63)
  145. BUILD_IRQ(0x64)
  146. BUILD_IRQ(0x65)
  147. BUILD_IRQ(0x66)
  148. BUILD_IRQ(0x67)
  149. BUILD_IRQ(0x68)
  150. BUILD_IRQ(0x69)
  151. BUILD_IRQ(0x6a)
  152. BUILD_IRQ(0x6b)
  153. BUILD_IRQ(0x6c)
  154. BUILD_IRQ(0x6d)
  155. BUILD_IRQ(0x6e)
  156. BUILD_IRQ(0x6f)
  157. BUILD_IRQ(0x70)
  158. #endif
  159. /* Pointers to the low-level handlers. */
  160. static void (*interrupt[MACH_IRQS])(void) = {
  161. IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
  162. IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
  163. IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
  164. IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
  165. IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
  166. IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
  167. IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
  168. IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
  169. IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
  170. IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
  171. IRQ0x4f_interrupt, IRQ0x50_interrupt,
  172. #if MACH_IRQS > 32
  173. IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
  174. IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
  175. IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
  176. IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
  177. IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
  178. IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
  179. IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
  180. IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
  181. IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
  182. IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
  183. IRQ0x6f_interrupt, IRQ0x70_interrupt,
  184. #endif
  185. };
  186. void
  187. block_irq(int irq, int cpu)
  188. {
  189. int intr_mask;
  190. unsigned long flags;
  191. spin_lock_irqsave(&irq_lock, flags);
  192. /* Remember, 1 let thru, 0 block. */
  193. if (irq - FIRST_IRQ < 32) {
  194. intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
  195. rw_mask, 0);
  196. intr_mask &= ~(1 << (irq - FIRST_IRQ));
  197. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
  198. 0, intr_mask);
  199. } else {
  200. intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
  201. rw_mask, 1);
  202. intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
  203. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
  204. 1, intr_mask);
  205. }
  206. spin_unlock_irqrestore(&irq_lock, flags);
  207. }
  208. void
  209. unblock_irq(int irq, int cpu)
  210. {
  211. int intr_mask;
  212. unsigned long flags;
  213. spin_lock_irqsave(&irq_lock, flags);
  214. /* Remember, 1 let thru, 0 block. */
  215. if (irq - FIRST_IRQ < 32) {
  216. intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
  217. rw_mask, 0);
  218. intr_mask |= (1 << (irq - FIRST_IRQ));
  219. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
  220. 0, intr_mask);
  221. } else {
  222. intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
  223. rw_mask, 1);
  224. intr_mask |= (1 << (irq - FIRST_IRQ - 32));
  225. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
  226. 1, intr_mask);
  227. }
  228. spin_unlock_irqrestore(&irq_lock, flags);
  229. }
  230. /* Find out which CPU the irq should be allocated to. */
  231. static int irq_cpu(int irq)
  232. {
  233. int cpu;
  234. unsigned long flags;
  235. spin_lock_irqsave(&irq_lock, flags);
  236. cpu = irq_allocations[irq - FIRST_IRQ].cpu;
  237. /* Fixed interrupts stay on the local CPU. */
  238. if (cpu == CPU_FIXED)
  239. {
  240. spin_unlock_irqrestore(&irq_lock, flags);
  241. return smp_processor_id();
  242. }
  243. /* Let the interrupt stay if possible */
  244. if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
  245. goto out;
  246. /* IRQ must be moved to another CPU. */
  247. cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
  248. irq_allocations[irq - FIRST_IRQ].cpu = cpu;
  249. out:
  250. spin_unlock_irqrestore(&irq_lock, flags);
  251. return cpu;
  252. }
  253. void crisv32_mask_irq(int irq)
  254. {
  255. int cpu;
  256. for (cpu = 0; cpu < NR_CPUS; cpu++)
  257. block_irq(irq, cpu);
  258. }
  259. void crisv32_unmask_irq(int irq)
  260. {
  261. unblock_irq(irq, irq_cpu(irq));
  262. }
  263. static void enable_crisv32_irq(struct irq_data *data)
  264. {
  265. crisv32_unmask_irq(data->irq);
  266. }
  267. static void disable_crisv32_irq(struct irq_data *data)
  268. {
  269. crisv32_mask_irq(data->irq);
  270. }
  271. static int set_affinity_crisv32_irq(struct irq_data *data,
  272. const struct cpumask *dest, bool force)
  273. {
  274. unsigned long flags;
  275. spin_lock_irqsave(&irq_lock, flags);
  276. irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
  277. spin_unlock_irqrestore(&irq_lock, flags);
  278. return 0;
  279. }
  280. static struct irq_chip crisv32_irq_type = {
  281. .name = "CRISv32",
  282. .irq_shutdown = disable_crisv32_irq,
  283. .irq_enable = enable_crisv32_irq,
  284. .irq_disable = disable_crisv32_irq,
  285. .irq_set_affinity = set_affinity_crisv32_irq,
  286. };
  287. void
  288. set_exception_vector(int n, irqvectptr addr)
  289. {
  290. etrax_irv->v[n] = (irqvectptr) addr;
  291. }
  292. extern void do_IRQ(int irq, struct pt_regs * regs);
  293. void
  294. crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
  295. {
  296. /* Interrupts that may not be moved to another CPU may
  297. * skip blocking. This is currently only valid for the
  298. * timer IRQ and the IPI and is used for the timer
  299. * interrupt to avoid watchdog starvation.
  300. */
  301. if (!block) {
  302. do_IRQ(irq, regs);
  303. return;
  304. }
  305. block_irq(irq, smp_processor_id());
  306. do_IRQ(irq, regs);
  307. unblock_irq(irq, irq_cpu(irq));
  308. }
  309. /* If multiple interrupts occur simultaneously we get a multiple
  310. * interrupt from the CPU and software has to sort out which
  311. * interrupts that happened. There are two special cases here:
  312. *
  313. * 1. Timer interrupts may never be blocked because of the
  314. * watchdog (refer to comment in include/asr/arch/irq.h)
  315. * 2. GDB serial port IRQs are unhandled here and will be handled
  316. * as a single IRQ when it strikes again because the GDB
  317. * stubb wants to save the registers in its own fashion.
  318. */
  319. void
  320. crisv32_do_multiple(struct pt_regs* regs)
  321. {
  322. int cpu;
  323. int mask;
  324. int masked[NBR_REGS];
  325. int bit;
  326. int i;
  327. cpu = smp_processor_id();
  328. /* An extra irq_enter here to prevent softIRQs to run after
  329. * each do_IRQ. This will decrease the interrupt latency.
  330. */
  331. irq_enter();
  332. for (i = 0; i < NBR_REGS; i++) {
  333. /* Get which IRQs that happened. */
  334. masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
  335. r_masked_vect, i);
  336. /* Calculate new IRQ mask with these IRQs disabled. */
  337. mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
  338. mask &= ~masked[i];
  339. /* Timer IRQ is never masked */
  340. #ifdef TIMER_VECT1
  341. if ((i == 1) && (masked[0] & TIMER_MASK))
  342. mask |= TIMER_MASK;
  343. #else
  344. if ((i == 0) && (masked[0] & TIMER_MASK))
  345. mask |= TIMER_MASK;
  346. #endif
  347. /* Block all the IRQs */
  348. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
  349. /* Check for timer IRQ and handle it special. */
  350. #ifdef TIMER_VECT1
  351. if ((i == 1) && (masked[i] & TIMER_MASK)) {
  352. masked[i] &= ~TIMER_MASK;
  353. do_IRQ(TIMER0_INTR_VECT, regs);
  354. }
  355. #else
  356. if ((i == 0) && (masked[i] & TIMER_MASK)) {
  357. masked[i] &= ~TIMER_MASK;
  358. do_IRQ(TIMER0_INTR_VECT, regs);
  359. }
  360. #endif
  361. }
  362. #ifdef IGNORE_MASK
  363. /* Remove IRQs that can't be handled as multiple. */
  364. masked[0] &= ~IGNORE_MASK;
  365. #endif
  366. /* Handle the rest of the IRQs. */
  367. for (i = 0; i < NBR_REGS; i++) {
  368. for (bit = 0; bit < 32; bit++) {
  369. if (masked[i] & (1 << bit))
  370. do_IRQ(bit + FIRST_IRQ + i*32, regs);
  371. }
  372. }
  373. /* Unblock all the IRQs. */
  374. for (i = 0; i < NBR_REGS; i++) {
  375. mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
  376. mask |= masked[i];
  377. REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
  378. }
  379. /* This irq_exit() will trigger the soft IRQs. */
  380. irq_exit();
  381. }
  382. static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
  383. irq_hw_number_t hw_irq_num)
  384. {
  385. irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
  386. return 0;
  387. }
  388. static struct irq_domain_ops crisv32_irq_ops = {
  389. .map = crisv32_irq_map,
  390. .xlate = irq_domain_xlate_onecell,
  391. };
  392. /*
  393. * This is called by start_kernel. It fixes the IRQ masks and setup the
  394. * interrupt vector table to point to bad_interrupt pointers.
  395. */
  396. void __init
  397. init_IRQ(void)
  398. {
  399. int i;
  400. int j;
  401. reg_intr_vect_rw_mask vect_mask = {0};
  402. struct device_node *np;
  403. struct irq_domain *domain;
  404. /* Clear all interrupts masks. */
  405. for (i = 0; i < NBR_REGS; i++)
  406. REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
  407. for (i = 0; i < 256; i++)
  408. etrax_irv->v[i] = weird_irq;
  409. np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
  410. domain = irq_domain_add_legacy(np, NBR_INTR_VECT - FIRST_IRQ,
  411. FIRST_IRQ, FIRST_IRQ,
  412. &crisv32_irq_ops, NULL);
  413. BUG_ON(!domain);
  414. irq_set_default_host(domain);
  415. of_node_put(np);
  416. for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT && j < MACH_IRQS; i++, j++)
  417. set_exception_vector(i, interrupt[j]);
  418. /* Mark Timer and IPI IRQs as CPU local */
  419. irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
  420. irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
  421. irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
  422. irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
  423. set_exception_vector(0x00, nmi_interrupt);
  424. set_exception_vector(0x30, multiple_interrupt);
  425. /* Set up handler for various MMU bus faults. */
  426. set_exception_vector(0x04, i_mmu_refill);
  427. set_exception_vector(0x05, i_mmu_invalid);
  428. set_exception_vector(0x06, i_mmu_access);
  429. set_exception_vector(0x07, i_mmu_execute);
  430. set_exception_vector(0x08, d_mmu_refill);
  431. set_exception_vector(0x09, d_mmu_invalid);
  432. set_exception_vector(0x0a, d_mmu_access);
  433. set_exception_vector(0x0b, d_mmu_write);
  434. #ifdef CONFIG_BUG
  435. /* Break 14 handler, used to implement cheap BUG(). */
  436. set_exception_vector(0x1e, breakh_BUG);
  437. #endif
  438. /* The system-call trap is reached by "break 13". */
  439. set_exception_vector(0x1d, system_call);
  440. /* Exception handlers for debugging, both user-mode and kernel-mode. */
  441. /* Break 8. */
  442. set_exception_vector(0x18, gdb_handle_exception);
  443. /* Hardware single step. */
  444. set_exception_vector(0x3, gdb_handle_exception);
  445. /* Hardware breakpoint. */
  446. set_exception_vector(0xc, gdb_handle_exception);
  447. #ifdef CONFIG_ETRAX_KGDB
  448. kgdb_init();
  449. /* Everything is set up; now trap the kernel. */
  450. breakpoint();
  451. #endif
  452. }