irq-mips-gic.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  8. */
  9. #include <linux/bitmap.h>
  10. #include <linux/clocksource.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqchip.h>
  15. #include <linux/irqchip/mips-gic.h>
  16. #include <linux/of_address.h>
  17. #include <linux/sched.h>
  18. #include <linux/smp.h>
  19. #include <asm/mips-cm.h>
  20. #include <asm/setup.h>
  21. #include <asm/traps.h>
  22. #include <dt-bindings/interrupt-controller/mips-gic.h>
  23. unsigned int gic_present;
  24. struct gic_pcpu_mask {
  25. DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
  26. };
  27. struct gic_irq_spec {
  28. enum {
  29. GIC_DEVICE,
  30. GIC_IPI
  31. } type;
  32. union {
  33. struct cpumask *ipimask;
  34. unsigned int hwirq;
  35. };
  36. };
  37. static unsigned long __gic_base_addr;
  38. static void __iomem *gic_base;
  39. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  40. static DEFINE_SPINLOCK(gic_lock);
  41. static struct irq_domain *gic_irq_domain;
  42. static struct irq_domain *gic_dev_domain;
  43. static struct irq_domain *gic_ipi_domain;
  44. static int gic_shared_intrs;
  45. static int gic_vpes;
  46. static unsigned int gic_cpu_pin;
  47. static unsigned int timer_cpu_pin;
  48. static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
  49. DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
  50. DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
  51. static void __gic_irq_dispatch(void);
  52. static inline u32 gic_read32(unsigned int reg)
  53. {
  54. return __raw_readl(gic_base + reg);
  55. }
  56. static inline u64 gic_read64(unsigned int reg)
  57. {
  58. return __raw_readq(gic_base + reg);
  59. }
  60. static inline unsigned long gic_read(unsigned int reg)
  61. {
  62. if (!mips_cm_is64)
  63. return gic_read32(reg);
  64. else
  65. return gic_read64(reg);
  66. }
  67. static inline void gic_write32(unsigned int reg, u32 val)
  68. {
  69. return __raw_writel(val, gic_base + reg);
  70. }
  71. static inline void gic_write64(unsigned int reg, u64 val)
  72. {
  73. return __raw_writeq(val, gic_base + reg);
  74. }
  75. static inline void gic_write(unsigned int reg, unsigned long val)
  76. {
  77. if (!mips_cm_is64)
  78. return gic_write32(reg, (u32)val);
  79. else
  80. return gic_write64(reg, (u64)val);
  81. }
  82. static inline void gic_update_bits(unsigned int reg, unsigned long mask,
  83. unsigned long val)
  84. {
  85. unsigned long regval;
  86. regval = gic_read(reg);
  87. regval &= ~mask;
  88. regval |= val;
  89. gic_write(reg, regval);
  90. }
  91. static inline void gic_reset_mask(unsigned int intr)
  92. {
  93. gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
  94. 1ul << GIC_INTR_BIT(intr));
  95. }
  96. static inline void gic_set_mask(unsigned int intr)
  97. {
  98. gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
  99. 1ul << GIC_INTR_BIT(intr));
  100. }
  101. static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
  102. {
  103. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
  104. GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
  105. (unsigned long)pol << GIC_INTR_BIT(intr));
  106. }
  107. static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
  108. {
  109. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
  110. GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
  111. (unsigned long)trig << GIC_INTR_BIT(intr));
  112. }
  113. static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
  114. {
  115. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
  116. 1ul << GIC_INTR_BIT(intr),
  117. (unsigned long)dual << GIC_INTR_BIT(intr));
  118. }
  119. static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
  120. {
  121. gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
  122. GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
  123. }
  124. static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
  125. {
  126. gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
  127. GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
  128. GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
  129. }
  130. #ifdef CONFIG_CLKSRC_MIPS_GIC
  131. cycle_t gic_read_count(void)
  132. {
  133. unsigned int hi, hi2, lo;
  134. if (mips_cm_is64)
  135. return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
  136. do {
  137. hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  138. lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
  139. hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  140. } while (hi2 != hi);
  141. return (((cycle_t) hi) << 32) + lo;
  142. }
  143. unsigned int gic_get_count_width(void)
  144. {
  145. unsigned int bits, config;
  146. config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  147. bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
  148. GIC_SH_CONFIG_COUNTBITS_SHF);
  149. return bits;
  150. }
  151. void gic_write_compare(cycle_t cnt)
  152. {
  153. if (mips_cm_is64) {
  154. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
  155. } else {
  156. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
  157. (int)(cnt >> 32));
  158. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
  159. (int)(cnt & 0xffffffff));
  160. }
  161. }
  162. void gic_write_cpu_compare(cycle_t cnt, int cpu)
  163. {
  164. unsigned long flags;
  165. local_irq_save(flags);
  166. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
  167. if (mips_cm_is64) {
  168. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
  169. } else {
  170. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
  171. (int)(cnt >> 32));
  172. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
  173. (int)(cnt & 0xffffffff));
  174. }
  175. local_irq_restore(flags);
  176. }
  177. cycle_t gic_read_compare(void)
  178. {
  179. unsigned int hi, lo;
  180. if (mips_cm_is64)
  181. return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
  182. hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
  183. lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
  184. return (((cycle_t) hi) << 32) + lo;
  185. }
  186. void gic_start_count(void)
  187. {
  188. u32 gicconfig;
  189. /* Start the counter */
  190. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  191. gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
  192. gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  193. }
  194. void gic_stop_count(void)
  195. {
  196. u32 gicconfig;
  197. /* Stop the counter */
  198. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  199. gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
  200. gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  201. }
  202. #endif
  203. unsigned gic_read_local_vp_id(void)
  204. {
  205. unsigned long ident;
  206. ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
  207. return ident & GIC_VP_IDENT_VCNUM_MSK;
  208. }
  209. static bool gic_local_irq_is_routable(int intr)
  210. {
  211. u32 vpe_ctl;
  212. /* All local interrupts are routable in EIC mode. */
  213. if (cpu_has_veic)
  214. return true;
  215. vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
  216. switch (intr) {
  217. case GIC_LOCAL_INT_TIMER:
  218. return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
  219. case GIC_LOCAL_INT_PERFCTR:
  220. return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
  221. case GIC_LOCAL_INT_FDC:
  222. return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
  223. case GIC_LOCAL_INT_SWINT0:
  224. case GIC_LOCAL_INT_SWINT1:
  225. return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
  226. default:
  227. return true;
  228. }
  229. }
  230. static void gic_bind_eic_interrupt(int irq, int set)
  231. {
  232. /* Convert irq vector # to hw int # */
  233. irq -= GIC_PIN_TO_VEC_OFFSET;
  234. /* Set irq to use shadow set */
  235. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
  236. GIC_VPE_EIC_SS(irq), set);
  237. }
  238. static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
  239. {
  240. irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
  241. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
  242. }
  243. int gic_get_c0_compare_int(void)
  244. {
  245. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
  246. return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  247. return irq_create_mapping(gic_irq_domain,
  248. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
  249. }
  250. int gic_get_c0_perfcount_int(void)
  251. {
  252. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
  253. /* Is the performance counter shared with the timer? */
  254. if (cp0_perfcount_irq < 0)
  255. return -1;
  256. return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  257. }
  258. return irq_create_mapping(gic_irq_domain,
  259. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
  260. }
  261. int gic_get_c0_fdc_int(void)
  262. {
  263. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
  264. /* Is the FDC IRQ even present? */
  265. if (cp0_fdc_irq < 0)
  266. return -1;
  267. return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
  268. }
  269. return irq_create_mapping(gic_irq_domain,
  270. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
  271. }
  272. int gic_get_usm_range(struct resource *gic_usm_res)
  273. {
  274. if (!gic_present)
  275. return -1;
  276. gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
  277. gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);
  278. return 0;
  279. }
  280. static void gic_handle_shared_int(bool chained)
  281. {
  282. unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
  283. unsigned long *pcpu_mask;
  284. unsigned long pending_reg, intrmask_reg;
  285. DECLARE_BITMAP(pending, GIC_MAX_INTRS);
  286. DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
  287. /* Get per-cpu bitmaps */
  288. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  289. pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
  290. intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
  291. for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
  292. pending[i] = gic_read(pending_reg);
  293. intrmask[i] = gic_read(intrmask_reg);
  294. pending_reg += gic_reg_step;
  295. intrmask_reg += gic_reg_step;
  296. if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
  297. continue;
  298. pending[i] |= (u64)gic_read(pending_reg) << 32;
  299. intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
  300. pending_reg += gic_reg_step;
  301. intrmask_reg += gic_reg_step;
  302. }
  303. bitmap_and(pending, pending, intrmask, gic_shared_intrs);
  304. bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
  305. for_each_set_bit(intr, pending, gic_shared_intrs) {
  306. virq = irq_linear_revmap(gic_irq_domain,
  307. GIC_SHARED_TO_HWIRQ(intr));
  308. if (chained)
  309. generic_handle_irq(virq);
  310. else
  311. do_IRQ(virq);
  312. }
  313. }
  314. static void gic_mask_irq(struct irq_data *d)
  315. {
  316. gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  317. }
  318. static void gic_unmask_irq(struct irq_data *d)
  319. {
  320. gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  321. }
  322. static void gic_ack_irq(struct irq_data *d)
  323. {
  324. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  325. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
  326. }
  327. static int gic_set_type(struct irq_data *d, unsigned int type)
  328. {
  329. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  330. unsigned long flags;
  331. bool is_edge;
  332. spin_lock_irqsave(&gic_lock, flags);
  333. switch (type & IRQ_TYPE_SENSE_MASK) {
  334. case IRQ_TYPE_EDGE_FALLING:
  335. gic_set_polarity(irq, GIC_POL_NEG);
  336. gic_set_trigger(irq, GIC_TRIG_EDGE);
  337. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  338. is_edge = true;
  339. break;
  340. case IRQ_TYPE_EDGE_RISING:
  341. gic_set_polarity(irq, GIC_POL_POS);
  342. gic_set_trigger(irq, GIC_TRIG_EDGE);
  343. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  344. is_edge = true;
  345. break;
  346. case IRQ_TYPE_EDGE_BOTH:
  347. /* polarity is irrelevant in this case */
  348. gic_set_trigger(irq, GIC_TRIG_EDGE);
  349. gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
  350. is_edge = true;
  351. break;
  352. case IRQ_TYPE_LEVEL_LOW:
  353. gic_set_polarity(irq, GIC_POL_NEG);
  354. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  355. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  356. is_edge = false;
  357. break;
  358. case IRQ_TYPE_LEVEL_HIGH:
  359. default:
  360. gic_set_polarity(irq, GIC_POL_POS);
  361. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  362. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  363. is_edge = false;
  364. break;
  365. }
  366. if (is_edge)
  367. irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
  368. handle_edge_irq, NULL);
  369. else
  370. irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
  371. handle_level_irq, NULL);
  372. spin_unlock_irqrestore(&gic_lock, flags);
  373. return 0;
  374. }
  375. #ifdef CONFIG_SMP
  376. static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
  377. bool force)
  378. {
  379. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  380. cpumask_t tmp = CPU_MASK_NONE;
  381. unsigned long flags;
  382. int i;
  383. cpumask_and(&tmp, cpumask, cpu_online_mask);
  384. if (cpumask_empty(&tmp))
  385. return -EINVAL;
  386. /* Assumption : cpumask refers to a single CPU */
  387. spin_lock_irqsave(&gic_lock, flags);
  388. /* Re-route this IRQ */
  389. gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
  390. /* Update the pcpu_masks */
  391. for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
  392. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  393. set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
  394. cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
  395. spin_unlock_irqrestore(&gic_lock, flags);
  396. return IRQ_SET_MASK_OK_NOCOPY;
  397. }
  398. #endif
  399. static struct irq_chip gic_level_irq_controller = {
  400. .name = "MIPS GIC",
  401. .irq_mask = gic_mask_irq,
  402. .irq_unmask = gic_unmask_irq,
  403. .irq_set_type = gic_set_type,
  404. #ifdef CONFIG_SMP
  405. .irq_set_affinity = gic_set_affinity,
  406. #endif
  407. };
  408. static struct irq_chip gic_edge_irq_controller = {
  409. .name = "MIPS GIC",
  410. .irq_ack = gic_ack_irq,
  411. .irq_mask = gic_mask_irq,
  412. .irq_unmask = gic_unmask_irq,
  413. .irq_set_type = gic_set_type,
  414. #ifdef CONFIG_SMP
  415. .irq_set_affinity = gic_set_affinity,
  416. #endif
  417. .ipi_send_single = gic_send_ipi,
  418. };
  419. static void gic_handle_local_int(bool chained)
  420. {
  421. unsigned long pending, masked;
  422. unsigned int intr, virq;
  423. pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
  424. masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
  425. bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
  426. for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
  427. virq = irq_linear_revmap(gic_irq_domain,
  428. GIC_LOCAL_TO_HWIRQ(intr));
  429. if (chained)
  430. generic_handle_irq(virq);
  431. else
  432. do_IRQ(virq);
  433. }
  434. }
  435. static void gic_mask_local_irq(struct irq_data *d)
  436. {
  437. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  438. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
  439. }
  440. static void gic_unmask_local_irq(struct irq_data *d)
  441. {
  442. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  443. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
  444. }
  445. static struct irq_chip gic_local_irq_controller = {
  446. .name = "MIPS GIC Local",
  447. .irq_mask = gic_mask_local_irq,
  448. .irq_unmask = gic_unmask_local_irq,
  449. };
  450. static void gic_mask_local_irq_all_vpes(struct irq_data *d)
  451. {
  452. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  453. int i;
  454. unsigned long flags;
  455. spin_lock_irqsave(&gic_lock, flags);
  456. for (i = 0; i < gic_vpes; i++) {
  457. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  458. mips_cm_vp_id(i));
  459. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
  460. }
  461. spin_unlock_irqrestore(&gic_lock, flags);
  462. }
  463. static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
  464. {
  465. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  466. int i;
  467. unsigned long flags;
  468. spin_lock_irqsave(&gic_lock, flags);
  469. for (i = 0; i < gic_vpes; i++) {
  470. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  471. mips_cm_vp_id(i));
  472. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
  473. }
  474. spin_unlock_irqrestore(&gic_lock, flags);
  475. }
  476. static struct irq_chip gic_all_vpes_local_irq_controller = {
  477. .name = "MIPS GIC Local",
  478. .irq_mask = gic_mask_local_irq_all_vpes,
  479. .irq_unmask = gic_unmask_local_irq_all_vpes,
  480. };
  481. static void __gic_irq_dispatch(void)
  482. {
  483. gic_handle_local_int(false);
  484. gic_handle_shared_int(false);
  485. }
  486. static void gic_irq_dispatch(struct irq_desc *desc)
  487. {
  488. gic_handle_local_int(true);
  489. gic_handle_shared_int(true);
  490. }
  491. static void __init gic_basic_init(void)
  492. {
  493. unsigned int i;
  494. board_bind_eic_interrupt = &gic_bind_eic_interrupt;
  495. /* Setup defaults */
  496. for (i = 0; i < gic_shared_intrs; i++) {
  497. gic_set_polarity(i, GIC_POL_POS);
  498. gic_set_trigger(i, GIC_TRIG_LEVEL);
  499. gic_reset_mask(i);
  500. }
  501. for (i = 0; i < gic_vpes; i++) {
  502. unsigned int j;
  503. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  504. mips_cm_vp_id(i));
  505. for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
  506. if (!gic_local_irq_is_routable(j))
  507. continue;
  508. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
  509. }
  510. }
  511. }
  512. static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
  513. irq_hw_number_t hw)
  514. {
  515. int intr = GIC_HWIRQ_TO_LOCAL(hw);
  516. int ret = 0;
  517. int i;
  518. unsigned long flags;
  519. if (!gic_local_irq_is_routable(intr))
  520. return -EPERM;
  521. spin_lock_irqsave(&gic_lock, flags);
  522. for (i = 0; i < gic_vpes; i++) {
  523. u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
  524. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  525. mips_cm_vp_id(i));
  526. switch (intr) {
  527. case GIC_LOCAL_INT_WD:
  528. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
  529. break;
  530. case GIC_LOCAL_INT_COMPARE:
  531. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
  532. val);
  533. break;
  534. case GIC_LOCAL_INT_TIMER:
  535. /* CONFIG_MIPS_CMP workaround (see __gic_init) */
  536. val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
  537. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
  538. val);
  539. break;
  540. case GIC_LOCAL_INT_PERFCTR:
  541. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
  542. val);
  543. break;
  544. case GIC_LOCAL_INT_SWINT0:
  545. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
  546. val);
  547. break;
  548. case GIC_LOCAL_INT_SWINT1:
  549. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
  550. val);
  551. break;
  552. case GIC_LOCAL_INT_FDC:
  553. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
  554. break;
  555. default:
  556. pr_err("Invalid local IRQ %d\n", intr);
  557. ret = -EINVAL;
  558. break;
  559. }
  560. }
  561. spin_unlock_irqrestore(&gic_lock, flags);
  562. return ret;
  563. }
  564. static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
  565. irq_hw_number_t hw, unsigned int vpe)
  566. {
  567. int intr = GIC_HWIRQ_TO_SHARED(hw);
  568. unsigned long flags;
  569. int i;
  570. spin_lock_irqsave(&gic_lock, flags);
  571. gic_map_to_pin(intr, gic_cpu_pin);
  572. gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
  573. for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
  574. clear_bit(intr, pcpu_masks[i].pcpu_mask);
  575. set_bit(intr, pcpu_masks[vpe].pcpu_mask);
  576. spin_unlock_irqrestore(&gic_lock, flags);
  577. return 0;
  578. }
  579. static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
  580. unsigned int hwirq)
  581. {
  582. struct irq_chip *chip;
  583. int err;
  584. if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
  585. err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
  586. &gic_level_irq_controller,
  587. NULL);
  588. } else {
  589. switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
  590. case GIC_LOCAL_INT_TIMER:
  591. case GIC_LOCAL_INT_PERFCTR:
  592. case GIC_LOCAL_INT_FDC:
  593. /*
  594. * HACK: These are all really percpu interrupts, but
  595. * the rest of the MIPS kernel code does not use the
  596. * percpu IRQ API for them.
  597. */
  598. chip = &gic_all_vpes_local_irq_controller;
  599. irq_set_handler(virq, handle_percpu_irq);
  600. break;
  601. default:
  602. chip = &gic_local_irq_controller;
  603. irq_set_handler(virq, handle_percpu_devid_irq);
  604. irq_set_percpu_devid(virq);
  605. break;
  606. }
  607. err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
  608. chip, NULL);
  609. }
  610. return err;
  611. }
  612. static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
  613. unsigned int nr_irqs, void *arg)
  614. {
  615. struct gic_irq_spec *spec = arg;
  616. irq_hw_number_t hwirq, base_hwirq;
  617. int cpu, ret, i;
  618. if (spec->type == GIC_DEVICE) {
  619. /* verify that shared irqs don't conflict with an IPI irq */
  620. if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
  621. test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
  622. return -EBUSY;
  623. return gic_setup_dev_chip(d, virq, spec->hwirq);
  624. } else {
  625. base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
  626. if (base_hwirq == gic_shared_intrs) {
  627. return -ENOMEM;
  628. }
  629. /* check that we have enough space */
  630. for (i = base_hwirq; i < nr_irqs; i++) {
  631. if (!test_bit(i, ipi_available))
  632. return -EBUSY;
  633. }
  634. bitmap_clear(ipi_available, base_hwirq, nr_irqs);
  635. /* map the hwirq for each cpu consecutively */
  636. i = 0;
  637. for_each_cpu(cpu, spec->ipimask) {
  638. hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
  639. ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
  640. &gic_level_irq_controller,
  641. NULL);
  642. if (ret)
  643. goto error;
  644. irq_set_handler(virq + i, handle_level_irq);
  645. ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
  646. if (ret)
  647. goto error;
  648. i++;
  649. }
  650. /*
  651. * tell the parent about the base hwirq we allocated so it can
  652. * set its own domain data
  653. */
  654. spec->hwirq = base_hwirq;
  655. }
  656. return 0;
  657. error:
  658. bitmap_set(ipi_available, base_hwirq, nr_irqs);
  659. return ret;
  660. }
  661. void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
  662. unsigned int nr_irqs)
  663. {
  664. irq_hw_number_t base_hwirq;
  665. struct irq_data *data;
  666. data = irq_get_irq_data(virq);
  667. if (!data)
  668. return;
  669. base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
  670. bitmap_set(ipi_available, base_hwirq, nr_irqs);
  671. }
  672. int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
  673. enum irq_domain_bus_token bus_token)
  674. {
  675. /* this domain should'nt be accessed directly */
  676. return 0;
  677. }
  678. static const struct irq_domain_ops gic_irq_domain_ops = {
  679. .alloc = gic_irq_domain_alloc,
  680. .free = gic_irq_domain_free,
  681. .match = gic_irq_domain_match,
  682. };
  683. static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
  684. const u32 *intspec, unsigned int intsize,
  685. irq_hw_number_t *out_hwirq,
  686. unsigned int *out_type)
  687. {
  688. if (intsize != 3)
  689. return -EINVAL;
  690. if (intspec[0] == GIC_SHARED)
  691. *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
  692. else if (intspec[0] == GIC_LOCAL)
  693. *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
  694. else
  695. return -EINVAL;
  696. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  697. return 0;
  698. }
  699. static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
  700. unsigned int nr_irqs, void *arg)
  701. {
  702. struct irq_fwspec *fwspec = arg;
  703. struct gic_irq_spec spec = {
  704. .type = GIC_DEVICE,
  705. };
  706. int i, ret;
  707. if (fwspec->param[0] == GIC_SHARED)
  708. spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
  709. else
  710. spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
  711. ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
  712. if (ret)
  713. return ret;
  714. for (i = 0; i < nr_irqs; i++) {
  715. ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
  716. if (ret)
  717. goto error;
  718. }
  719. return 0;
  720. error:
  721. irq_domain_free_irqs_parent(d, virq, nr_irqs);
  722. return ret;
  723. }
  724. void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
  725. unsigned int nr_irqs)
  726. {
  727. /* no real allocation is done for dev irqs, so no need to free anything */
  728. return;
  729. }
  730. static void gic_dev_domain_activate(struct irq_domain *domain,
  731. struct irq_data *d)
  732. {
  733. if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
  734. gic_local_irq_domain_map(domain, d->irq, d->hwirq);
  735. else
  736. gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
  737. }
  738. static struct irq_domain_ops gic_dev_domain_ops = {
  739. .xlate = gic_dev_domain_xlate,
  740. .alloc = gic_dev_domain_alloc,
  741. .free = gic_dev_domain_free,
  742. .activate = gic_dev_domain_activate,
  743. };
  744. static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
  745. const u32 *intspec, unsigned int intsize,
  746. irq_hw_number_t *out_hwirq,
  747. unsigned int *out_type)
  748. {
  749. /*
  750. * There's nothing to translate here. hwirq is dynamically allocated and
  751. * the irq type is always edge triggered.
  752. * */
  753. *out_hwirq = 0;
  754. *out_type = IRQ_TYPE_EDGE_RISING;
  755. return 0;
  756. }
  757. static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
  758. unsigned int nr_irqs, void *arg)
  759. {
  760. struct cpumask *ipimask = arg;
  761. struct gic_irq_spec spec = {
  762. .type = GIC_IPI,
  763. .ipimask = ipimask
  764. };
  765. int ret, i;
  766. ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
  767. if (ret)
  768. return ret;
  769. /* the parent should have set spec.hwirq to the base_hwirq it allocated */
  770. for (i = 0; i < nr_irqs; i++) {
  771. ret = irq_domain_set_hwirq_and_chip(d, virq + i,
  772. GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
  773. &gic_edge_irq_controller,
  774. NULL);
  775. if (ret)
  776. goto error;
  777. ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
  778. if (ret)
  779. goto error;
  780. }
  781. return 0;
  782. error:
  783. irq_domain_free_irqs_parent(d, virq, nr_irqs);
  784. return ret;
  785. }
  786. void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
  787. unsigned int nr_irqs)
  788. {
  789. irq_domain_free_irqs_parent(d, virq, nr_irqs);
  790. }
  791. int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
  792. enum irq_domain_bus_token bus_token)
  793. {
  794. bool is_ipi;
  795. switch (bus_token) {
  796. case DOMAIN_BUS_IPI:
  797. is_ipi = d->bus_token == bus_token;
  798. return (!node || to_of_node(d->fwnode) == node) && is_ipi;
  799. break;
  800. default:
  801. return 0;
  802. }
  803. }
  804. static struct irq_domain_ops gic_ipi_domain_ops = {
  805. .xlate = gic_ipi_domain_xlate,
  806. .alloc = gic_ipi_domain_alloc,
  807. .free = gic_ipi_domain_free,
  808. .match = gic_ipi_domain_match,
  809. };
  810. static void __init __gic_init(unsigned long gic_base_addr,
  811. unsigned long gic_addrspace_size,
  812. unsigned int cpu_vec, unsigned int irqbase,
  813. struct device_node *node)
  814. {
  815. unsigned int gicconfig, cpu;
  816. unsigned int v[2];
  817. __gic_base_addr = gic_base_addr;
  818. gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
  819. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  820. gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  821. GIC_SH_CONFIG_NUMINTRS_SHF;
  822. gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
  823. gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  824. GIC_SH_CONFIG_NUMVPES_SHF;
  825. gic_vpes = gic_vpes + 1;
  826. if (cpu_has_veic) {
  827. /* Set EIC mode for all VPEs */
  828. for_each_present_cpu(cpu) {
  829. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  830. mips_cm_vp_id(cpu));
  831. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
  832. GIC_VPE_CTL_EIC_MODE_MSK);
  833. }
  834. /* Always use vector 1 in EIC mode */
  835. gic_cpu_pin = 0;
  836. timer_cpu_pin = gic_cpu_pin;
  837. set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
  838. __gic_irq_dispatch);
  839. } else {
  840. gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
  841. irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
  842. gic_irq_dispatch);
  843. /*
  844. * With the CMP implementation of SMP (deprecated), other CPUs
  845. * are started by the bootloader and put into a timer based
  846. * waiting poll loop. We must not re-route those CPU's local
  847. * timer interrupts as the wait instruction will never finish,
  848. * so just handle whatever CPU interrupt it is routed to by
  849. * default.
  850. *
  851. * This workaround should be removed when CMP support is
  852. * dropped.
  853. */
  854. if (IS_ENABLED(CONFIG_MIPS_CMP) &&
  855. gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
  856. timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
  857. GIC_VPE_TIMER_MAP)) &
  858. GIC_MAP_MSK;
  859. irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
  860. GIC_CPU_PIN_OFFSET +
  861. timer_cpu_pin,
  862. gic_irq_dispatch);
  863. } else {
  864. timer_cpu_pin = gic_cpu_pin;
  865. }
  866. }
  867. gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
  868. gic_shared_intrs, irqbase,
  869. &gic_irq_domain_ops, NULL);
  870. if (!gic_irq_domain)
  871. panic("Failed to add GIC IRQ domain");
  872. gic_irq_domain->name = "mips-gic-irq";
  873. gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
  874. GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
  875. node, &gic_dev_domain_ops, NULL);
  876. if (!gic_dev_domain)
  877. panic("Failed to add GIC DEV domain");
  878. gic_dev_domain->name = "mips-gic-dev";
  879. gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
  880. IRQ_DOMAIN_FLAG_IPI_PER_CPU,
  881. GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
  882. node, &gic_ipi_domain_ops, NULL);
  883. if (!gic_ipi_domain)
  884. panic("Failed to add GIC IPI domain");
  885. gic_ipi_domain->name = "mips-gic-ipi";
  886. gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
  887. if (node &&
  888. !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
  889. bitmap_set(ipi_resrv, v[0], v[1]);
  890. } else {
  891. /* Make the last 2 * gic_vpes available for IPIs */
  892. bitmap_set(ipi_resrv,
  893. gic_shared_intrs - 2 * gic_vpes,
  894. 2 * gic_vpes);
  895. }
  896. bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
  897. gic_basic_init();
  898. }
  899. void __init gic_init(unsigned long gic_base_addr,
  900. unsigned long gic_addrspace_size,
  901. unsigned int cpu_vec, unsigned int irqbase)
  902. {
  903. __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
  904. }
  905. static int __init gic_of_init(struct device_node *node,
  906. struct device_node *parent)
  907. {
  908. struct resource res;
  909. unsigned int cpu_vec, i = 0, reserved = 0;
  910. phys_addr_t gic_base;
  911. size_t gic_len;
  912. /* Find the first available CPU vector. */
  913. while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
  914. i++, &cpu_vec))
  915. reserved |= BIT(cpu_vec);
  916. for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
  917. if (!(reserved & BIT(cpu_vec)))
  918. break;
  919. }
  920. if (cpu_vec == 8) {
  921. pr_err("No CPU vectors available for GIC\n");
  922. return -ENODEV;
  923. }
  924. if (of_address_to_resource(node, 0, &res)) {
  925. /*
  926. * Probe the CM for the GIC base address if not specified
  927. * in the device-tree.
  928. */
  929. if (mips_cm_present()) {
  930. gic_base = read_gcr_gic_base() &
  931. ~CM_GCR_GIC_BASE_GICEN_MSK;
  932. gic_len = 0x20000;
  933. } else {
  934. pr_err("Failed to get GIC memory range\n");
  935. return -ENODEV;
  936. }
  937. } else {
  938. gic_base = res.start;
  939. gic_len = resource_size(&res);
  940. }
  941. if (mips_cm_present()) {
  942. write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
  943. /* Ensure GIC region is enabled before trying to access it */
  944. __sync();
  945. }
  946. gic_present = true;
  947. __gic_init(gic_base, gic_len, cpu_vec, 0, node);
  948. return 0;
  949. }
  950. IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);