gic.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. /*
  2. * linux/arch/arm/common/gic.c
  3. *
  4. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Interrupt architecture for the GIC:
  11. *
  12. * o There is one Interrupt Distributor, which receives interrupts
  13. * from system devices and sends them to the Interrupt Controllers.
  14. *
  15. * o There is one CPU Interface per CPU, which sends interrupts sent
  16. * by the Distributor, and interrupts generated locally, to the
  17. * associated CPU. The base address of the CPU interface is usually
  18. * aliased so that the same address points to different chips depending
  19. * on the CPU it is accessed from.
  20. *
  21. * Note that IRQs 0-31 are special - they are local to each CPU.
  22. * As such, the enable set/clear, pending set/clear and active bit
  23. * registers are banked per-cpu for these sources.
  24. */
  25. #include <linux/init.h>
  26. #include <linux/kernel.h>
  27. #include <linux/err.h>
  28. #include <linux/module.h>
  29. #include <linux/list.h>
  30. #include <linux/smp.h>
  31. #include <linux/cpu_pm.h>
  32. #include <linux/cpumask.h>
  33. #include <linux/io.h>
  34. #include <linux/of.h>
  35. #include <linux/of_address.h>
  36. #include <linux/of_irq.h>
  37. #include <linux/irqdomain.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/percpu.h>
  40. #include <linux/slab.h>
  41. #include <linux/syscore_ops.h>
  42. #include <linux/wakeup_reason.h>
  43. #include <asm/irq.h>
  44. #include <asm/exception.h>
  45. #include <asm/smp_plat.h>
  46. #include <asm/mach/irq.h>
  47. #include <asm/hardware/gic.h>
  48. #include <asm/system.h>
  49. #include <mach/socinfo.h>
  50. #include <mach/msm_rtb.h>
  51. union gic_base {
  52. void __iomem *common_base;
  53. void __percpu __iomem **percpu_base;
  54. };
  55. struct gic_chip_data {
  56. unsigned int irq_offset;
  57. union gic_base dist_base;
  58. union gic_base cpu_base;
  59. bool need_access_lock;
  60. #ifdef CONFIG_CPU_PM
  61. u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
  62. u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
  63. u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
  64. u32 saved_dist_pri[DIV_ROUND_UP(1020, 4)];
  65. u32 __percpu *saved_ppi_enable;
  66. u32 __percpu *saved_ppi_conf;
  67. #endif
  68. u32 saved_dist_isr[DIV_ROUND_UP(1020, 32)];
  69. struct irq_domain *domain;
  70. unsigned int gic_irqs;
  71. #ifdef CONFIG_GIC_NON_BANKED
  72. void __iomem *(*get_base)(union gic_base *);
  73. #endif
  74. unsigned int max_irq;
  75. #ifdef CONFIG_PM
  76. unsigned int wakeup_irqs[32];
  77. unsigned int enabled_irqs[32];
  78. #endif
  79. };
  80. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  81. #ifdef CONFIG_CPU_PM
  82. static unsigned int saved_dist_ctrl, saved_cpu_ctrl;
  83. #endif
  84. /*
  85. * Supported arch specific GIC irq extension.
  86. * Default make them NULL.
  87. */
  88. struct irq_chip gic_arch_extn = {
  89. .irq_eoi = NULL,
  90. .irq_mask = NULL,
  91. .irq_unmask = NULL,
  92. .irq_retrigger = NULL,
  93. .irq_set_type = NULL,
  94. .irq_set_wake = NULL,
  95. .irq_disable = NULL,
  96. };
  97. #ifndef MAX_GIC_NR
  98. #define MAX_GIC_NR 1
  99. #endif
  100. static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
  101. #ifdef CONFIG_GIC_NON_BANKED
  102. static void __iomem *gic_get_percpu_base(union gic_base *base)
  103. {
  104. return *__this_cpu_ptr(base->percpu_base);
  105. }
  106. static void __iomem *gic_get_common_base(union gic_base *base)
  107. {
  108. return base->common_base;
  109. }
  110. static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
  111. {
  112. return data->get_base(&data->dist_base);
  113. }
  114. static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
  115. {
  116. return data->get_base(&data->cpu_base);
  117. }
  118. static inline void gic_set_base_accessor(struct gic_chip_data *data,
  119. void __iomem *(*f)(union gic_base *))
  120. {
  121. data->get_base = f;
  122. }
  123. #else
  124. #define gic_data_dist_base(d) ((d)->dist_base.common_base)
  125. #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
  126. #define gic_set_base_accessor(d,f)
  127. #endif
  128. static inline void __iomem *gic_dist_base(struct irq_data *d)
  129. {
  130. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  131. return gic_data_dist_base(gic_data);
  132. }
  133. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  134. {
  135. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  136. return gic_data_cpu_base(gic_data);
  137. }
  138. static inline unsigned int gic_irq(struct irq_data *d)
  139. {
  140. return d->hwirq;
  141. }
  142. #if defined(CONFIG_CPU_V7) && defined(CONFIG_GIC_SECURE)
  143. static const inline bool is_cpu_secure(void)
  144. {
  145. unsigned int dscr;
  146. asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (dscr));
  147. /* BIT(18) - NS bit; 1 = NS; 0 = S */
  148. if (BIT(18) & dscr)
  149. return false;
  150. else
  151. return true;
  152. }
  153. #else
  154. static const inline bool is_cpu_secure(void)
  155. {
  156. return false;
  157. }
  158. #endif
  159. /*
  160. * Routines to acknowledge, disable and enable interrupts
  161. */
  162. static void gic_mask_irq(struct irq_data *d)
  163. {
  164. u32 mask = 1 << (gic_irq(d) % 32);
  165. raw_spin_lock(&irq_controller_lock);
  166. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
  167. if (gic_arch_extn.irq_mask)
  168. gic_arch_extn.irq_mask(d);
  169. raw_spin_unlock(&irq_controller_lock);
  170. }
  171. static void gic_unmask_irq(struct irq_data *d)
  172. {
  173. u32 mask = 1 << (gic_irq(d) % 32);
  174. raw_spin_lock(&irq_controller_lock);
  175. if (gic_arch_extn.irq_unmask)
  176. gic_arch_extn.irq_unmask(d);
  177. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
  178. raw_spin_unlock(&irq_controller_lock);
  179. }
  180. static void gic_disable_irq(struct irq_data *d)
  181. {
  182. if (gic_arch_extn.irq_disable)
  183. gic_arch_extn.irq_disable(d);
  184. }
  185. #ifdef CONFIG_PM
  186. static int gic_suspend_one(struct gic_chip_data *gic)
  187. {
  188. unsigned int i;
  189. void __iomem *base = gic_data_dist_base(gic);
  190. for (i = 0; i * 32 < gic->max_irq; i++) {
  191. gic->enabled_irqs[i]
  192. = readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
  193. /* disable all of them */
  194. writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4);
  195. /* enable the wakeup set */
  196. writel_relaxed(gic->wakeup_irqs[i],
  197. base + GIC_DIST_ENABLE_SET + i * 4);
  198. }
  199. mb();
  200. return 0;
  201. }
  202. static int gic_suspend(void)
  203. {
  204. int i;
  205. for (i = 0; i < MAX_GIC_NR; i++)
  206. gic_suspend_one(&gic_data[i]);
  207. return 0;
  208. }
  209. extern int msm_show_resume_irq_mask;
  210. static void gic_show_resume_irq(struct gic_chip_data *gic)
  211. {
  212. unsigned int i;
  213. u32 enabled;
  214. unsigned long pending[32];
  215. void __iomem *base = gic_data_dist_base(gic);
  216. if (!msm_show_resume_irq_mask)
  217. return;
  218. raw_spin_lock(&irq_controller_lock);
  219. for (i = 0; i * 32 < gic->max_irq; i++) {
  220. enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
  221. pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
  222. pending[i] &= enabled;
  223. }
  224. raw_spin_unlock(&irq_controller_lock);
  225. for (i = find_first_bit(pending, gic->max_irq);
  226. i < gic->max_irq;
  227. i = find_next_bit(pending, gic->max_irq, i+1)) {
  228. #ifdef CONFIG_SEC_PM_DEBUG
  229. log_wakeup_reason(i + gic->irq_offset);
  230. update_wakeup_reason_stats(i + gic->irq_offset);
  231. #endif
  232. }
  233. }
  234. static void gic_resume_one(struct gic_chip_data *gic)
  235. {
  236. unsigned int i;
  237. void __iomem *base = gic_data_dist_base(gic);
  238. gic_show_resume_irq(gic);
  239. for (i = 0; i * 32 < gic->max_irq; i++) {
  240. /* disable all of them */
  241. writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4);
  242. /* enable the enabled set */
  243. writel_relaxed(gic->enabled_irqs[i],
  244. base + GIC_DIST_ENABLE_SET + i * 4);
  245. }
  246. mb();
  247. }
  248. static void gic_resume(void)
  249. {
  250. int i;
  251. for (i = 0; i < MAX_GIC_NR; i++)
  252. gic_resume_one(&gic_data[i]);
  253. }
  254. static struct syscore_ops gic_syscore_ops = {
  255. .suspend = gic_suspend,
  256. .resume = gic_resume,
  257. };
  258. static int __init gic_init_sys(void)
  259. {
  260. register_syscore_ops(&gic_syscore_ops);
  261. return 0;
  262. }
  263. arch_initcall(gic_init_sys);
  264. #endif
  265. static void gic_eoi_irq(struct irq_data *d)
  266. {
  267. struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
  268. if (gic_arch_extn.irq_eoi) {
  269. raw_spin_lock(&irq_controller_lock);
  270. gic_arch_extn.irq_eoi(d);
  271. raw_spin_unlock(&irq_controller_lock);
  272. }
  273. if (gic->need_access_lock)
  274. raw_spin_lock(&irq_controller_lock);
  275. writel_relaxed_no_log(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
  276. if (gic->need_access_lock)
  277. raw_spin_unlock(&irq_controller_lock);
  278. }
  279. static int gic_set_type(struct irq_data *d, unsigned int type)
  280. {
  281. void __iomem *base = gic_dist_base(d);
  282. unsigned int gicirq = gic_irq(d);
  283. u32 enablemask = 1 << (gicirq % 32);
  284. u32 enableoff = (gicirq / 32) * 4;
  285. u32 confmask = 0x2 << ((gicirq % 16) * 2);
  286. u32 confoff = (gicirq / 16) * 4;
  287. bool enabled = false;
  288. u32 val;
  289. /* Interrupt configuration for SGIs can't be changed */
  290. if (gicirq < 16)
  291. return -EINVAL;
  292. if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  293. return -EINVAL;
  294. raw_spin_lock(&irq_controller_lock);
  295. if (gic_arch_extn.irq_set_type)
  296. gic_arch_extn.irq_set_type(d, type);
  297. val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
  298. if (type == IRQ_TYPE_LEVEL_HIGH)
  299. val &= ~confmask;
  300. else if (type == IRQ_TYPE_EDGE_RISING)
  301. val |= confmask;
  302. /*
  303. * As recommended by the spec, disable the interrupt before changing
  304. * the configuration
  305. */
  306. if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
  307. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
  308. enabled = true;
  309. }
  310. writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
  311. if (enabled)
  312. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
  313. raw_spin_unlock(&irq_controller_lock);
  314. return 0;
  315. }
  316. static int gic_retrigger(struct irq_data *d)
  317. {
  318. if (gic_arch_extn.irq_retrigger)
  319. return gic_arch_extn.irq_retrigger(d);
  320. /* the genirq layer expects 0 for a failure */
  321. return 0;
  322. }
  323. #ifdef CONFIG_SMP
  324. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  325. bool force)
  326. {
  327. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
  328. unsigned int shift = (gic_irq(d) % 4) * 8;
  329. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  330. u32 val, mask, bit;
  331. if (cpu >= 8 || cpu >= nr_cpu_ids)
  332. return -EINVAL;
  333. mask = 0xff << shift;
  334. bit = 1 << (cpu_logical_map(cpu) + shift);
  335. raw_spin_lock(&irq_controller_lock);
  336. val = readl_relaxed_no_log(reg) & ~mask;
  337. writel_relaxed_no_log(val | bit, reg);
  338. raw_spin_unlock(&irq_controller_lock);
  339. return IRQ_SET_MASK_OK;
  340. }
  341. #endif
  342. #ifdef CONFIG_PM
  343. static int gic_set_wake(struct irq_data *d, unsigned int on)
  344. {
  345. int ret = -ENXIO;
  346. unsigned int reg_offset, bit_offset;
  347. unsigned int gicirq = gic_irq(d);
  348. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  349. /* per-cpu interrupts cannot be wakeup interrupts */
  350. WARN_ON(gicirq < 32);
  351. reg_offset = gicirq / 32;
  352. bit_offset = gicirq % 32;
  353. if (on)
  354. gic_data->wakeup_irqs[reg_offset] |= 1 << bit_offset;
  355. else
  356. gic_data->wakeup_irqs[reg_offset] &= ~(1 << bit_offset);
  357. if (gic_arch_extn.irq_set_wake)
  358. ret = gic_arch_extn.irq_set_wake(d, on);
  359. return ret;
  360. }
  361. #else
  362. #define gic_set_wake NULL
  363. #endif
  364. asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  365. {
  366. u32 irqstat, irqnr;
  367. struct gic_chip_data *gic = &gic_data[0];
  368. void __iomem *cpu_base = gic_data_cpu_base(gic);
  369. do {
  370. if (gic->need_access_lock)
  371. raw_spin_lock(&irq_controller_lock);
  372. irqstat = readl_relaxed_no_log(cpu_base + GIC_CPU_INTACK);
  373. if (gic->need_access_lock)
  374. raw_spin_unlock(&irq_controller_lock);
  375. irqnr = irqstat & ~0x1c00;
  376. if (likely(irqnr > 15 && irqnr < 1021)) {
  377. irqnr = irq_find_mapping(gic->domain, irqnr);
  378. handle_IRQ(irqnr, regs);
  379. uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
  380. continue;
  381. }
  382. if (irqnr < 16) {
  383. if (gic->need_access_lock)
  384. raw_spin_lock(&irq_controller_lock);
  385. writel_relaxed_no_log(irqstat, cpu_base + GIC_CPU_EOI);
  386. if (gic->need_access_lock)
  387. raw_spin_unlock(&irq_controller_lock);
  388. #ifdef CONFIG_SMP
  389. handle_IPI(irqnr, regs);
  390. uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
  391. #endif
  392. continue;
  393. }
  394. break;
  395. } while (1);
  396. }
  397. static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  398. {
  399. struct gic_chip_data *chip_data = irq_get_handler_data(irq);
  400. struct irq_chip *chip = irq_get_chip(irq);
  401. unsigned int cascade_irq, gic_irq;
  402. unsigned long status;
  403. chained_irq_enter(chip, desc);
  404. raw_spin_lock(&irq_controller_lock);
  405. status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
  406. raw_spin_unlock(&irq_controller_lock);
  407. gic_irq = (status & 0x3ff);
  408. if (gic_irq == 1023)
  409. goto out;
  410. cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
  411. if (unlikely(gic_irq < 32 || gic_irq > 1020))
  412. do_bad_IRQ(cascade_irq, desc);
  413. else
  414. generic_handle_irq(cascade_irq);
  415. out:
  416. chained_irq_exit(chip, desc);
  417. }
  418. static struct irq_chip gic_chip = {
  419. .name = "GIC",
  420. .irq_mask = gic_mask_irq,
  421. .irq_unmask = gic_unmask_irq,
  422. .irq_eoi = gic_eoi_irq,
  423. .irq_set_type = gic_set_type,
  424. .irq_retrigger = gic_retrigger,
  425. #ifdef CONFIG_SMP
  426. .irq_set_affinity = gic_set_affinity,
  427. #endif
  428. .irq_disable = gic_disable_irq,
  429. .irq_set_wake = gic_set_wake,
  430. };
  431. void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  432. {
  433. if (gic_nr >= MAX_GIC_NR)
  434. BUG();
  435. if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
  436. BUG();
  437. irq_set_chained_handler(irq, gic_handle_cascade_irq);
  438. }
  439. static void __init gic_dist_init(struct gic_chip_data *gic)
  440. {
  441. unsigned int i;
  442. u32 cpumask;
  443. unsigned int gic_irqs = gic->gic_irqs;
  444. void __iomem *base = gic_data_dist_base(gic);
  445. u32 cpu = cpu_logical_map(smp_processor_id());
  446. cpumask = 1 << cpu;
  447. cpumask |= cpumask << 8;
  448. cpumask |= cpumask << 16;
  449. writel_relaxed(0, base + GIC_DIST_CTRL);
  450. /*
  451. * Set all global interrupts to be level triggered, active low.
  452. */
  453. for (i = 32; i < gic_irqs; i += 16)
  454. writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
  455. /*
  456. * Set all global interrupts to this CPU only.
  457. */
  458. for (i = 32; i < gic_irqs; i += 4)
  459. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  460. /*
  461. * Set NS/S.
  462. */
  463. if (is_cpu_secure())
  464. for (i = 32; i < gic_irqs; i += 32)
  465. writel_relaxed(0xFFFFFFFF,
  466. base + GIC_DIST_ISR + i * 4 / 32);
  467. /*
  468. * Set priority on all global interrupts.
  469. */
  470. for (i = 32; i < gic_irqs; i += 4)
  471. writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
  472. /*
  473. * Disable all interrupts. Leave the PPI and SGIs alone
  474. * as these enables are banked registers.
  475. */
  476. for (i = 32; i < gic_irqs; i += 32)
  477. writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
  478. gic->max_irq = gic_irqs;
  479. if (is_cpu_secure())
  480. writel_relaxed(3, base + GIC_DIST_CTRL);
  481. else
  482. writel_relaxed(1, base + GIC_DIST_CTRL);
  483. mb();
  484. }
  485. static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
  486. {
  487. void __iomem *dist_base = gic_data_dist_base(gic);
  488. void __iomem *base = gic_data_cpu_base(gic);
  489. int i;
  490. /*
  491. * Deal with the banked PPI and SGI interrupts - disable all
  492. * PPI interrupts, ensure all SGI interrupts are enabled.
  493. */
  494. if (gic->need_access_lock)
  495. raw_spin_lock(&irq_controller_lock);
  496. writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
  497. writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
  498. /* Set NS/S */
  499. if (is_cpu_secure())
  500. writel_relaxed(0xFFFFFFFF, dist_base + GIC_DIST_ISR);
  501. /*
  502. * Set priority on PPI and SGI interrupts
  503. */
  504. for (i = 0; i < 32; i += 4)
  505. writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
  506. writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  507. if (is_cpu_secure())
  508. writel_relaxed(0xF, base + GIC_CPU_CTRL);
  509. else
  510. writel_relaxed(1, base + GIC_CPU_CTRL);
  511. if (gic->need_access_lock)
  512. raw_spin_unlock(&irq_controller_lock);
  513. mb();
  514. }
  515. #ifdef CONFIG_CPU_PM
  516. /*
  517. * Saves the GIC distributor registers during suspend or idle. Must be called
  518. * with interrupts disabled but before powering down the GIC. After calling
  519. * this function, no interrupts will be delivered by the GIC, and another
  520. * platform-specific wakeup source must be enabled.
  521. */
  522. static void gic_dist_save(unsigned int gic_nr)
  523. {
  524. unsigned int gic_irqs;
  525. void __iomem *dist_base;
  526. int i;
  527. if (gic_nr >= MAX_GIC_NR)
  528. BUG();
  529. gic_irqs = gic_data[gic_nr].gic_irqs;
  530. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  531. if (!dist_base)
  532. return;
  533. saved_dist_ctrl = readl_relaxed(dist_base + GIC_DIST_CTRL);
  534. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  535. gic_data[gic_nr].saved_spi_conf[i] =
  536. readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  537. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  538. gic_data[gic_nr].saved_spi_target[i] =
  539. readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  540. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  541. gic_data[gic_nr].saved_dist_pri[i] =
  542. readl_relaxed(dist_base + GIC_DIST_PRI + i * 4);
  543. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  544. gic_data[gic_nr].saved_spi_enable[i] =
  545. readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  546. if (is_cpu_secure()) {
  547. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  548. gic_data[gic_nr].saved_dist_isr[i] =
  549. readl_relaxed(dist_base + GIC_DIST_ISR + i * 4);
  550. }
  551. }
  552. /*
  553. * Restores the GIC distributor registers during resume or when coming out of
  554. * idle. Must be called before enabling interrupts. If a level interrupt
  555. * that occured while the GIC was suspended is still present, it will be
  556. * handled normally, but any edge interrupts that occured will not be seen by
  557. * the GIC and need to be handled by the platform-specific wakeup source.
  558. */
  559. static void gic_dist_restore(unsigned int gic_nr)
  560. {
  561. unsigned int gic_irqs;
  562. unsigned int i;
  563. void __iomem *dist_base;
  564. if (gic_nr >= MAX_GIC_NR)
  565. BUG();
  566. gic_irqs = gic_data[gic_nr].gic_irqs;
  567. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  568. if (!dist_base)
  569. return;
  570. writel_relaxed(0, dist_base + GIC_DIST_CTRL);
  571. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  572. writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
  573. dist_base + GIC_DIST_CONFIG + i * 4);
  574. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  575. writel_relaxed(gic_data[gic_nr].saved_dist_pri[i],
  576. dist_base + GIC_DIST_PRI + i * 4);
  577. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  578. writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
  579. dist_base + GIC_DIST_TARGET + i * 4);
  580. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  581. writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
  582. dist_base + GIC_DIST_ENABLE_SET + i * 4);
  583. if (is_cpu_secure()) {
  584. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  585. writel_relaxed(gic_data[gic_nr].saved_dist_isr[i],
  586. dist_base + GIC_DIST_ISR + i * 4);
  587. }
  588. writel_relaxed(saved_dist_ctrl, dist_base + GIC_DIST_CTRL);
  589. }
  590. static void gic_cpu_save(unsigned int gic_nr)
  591. {
  592. int i;
  593. u32 *ptr;
  594. void __iomem *dist_base;
  595. void __iomem *cpu_base;
  596. if (gic_nr >= MAX_GIC_NR)
  597. BUG();
  598. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  599. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  600. if (!dist_base || !cpu_base)
  601. return;
  602. saved_cpu_ctrl = readl_relaxed_no_log(cpu_base + GIC_CPU_CTRL);
  603. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  604. gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed_no_log(
  605. dist_base +
  606. GIC_DIST_PRI + i * 4);
  607. ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  608. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  609. ptr[i] = readl_relaxed_no_log(dist_base +
  610. GIC_DIST_ENABLE_SET + i * 4);
  611. ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  612. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  613. ptr[i] = readl_relaxed_no_log(dist_base +
  614. GIC_DIST_CONFIG + i * 4);
  615. }
  616. static void gic_cpu_restore(unsigned int gic_nr)
  617. {
  618. int i;
  619. u32 *ptr;
  620. void __iomem *dist_base;
  621. void __iomem *cpu_base;
  622. if (gic_nr >= MAX_GIC_NR)
  623. BUG();
  624. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  625. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  626. if (!dist_base || !cpu_base)
  627. return;
  628. ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  629. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  630. writel_relaxed_no_log(ptr[i], dist_base +
  631. GIC_DIST_ENABLE_SET + i * 4);
  632. ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  633. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  634. writel_relaxed_no_log(ptr[i], dist_base +
  635. GIC_DIST_CONFIG + i * 4);
  636. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  637. writel_relaxed_no_log(gic_data[gic_nr].saved_dist_pri[i],
  638. dist_base + GIC_DIST_PRI + i * 4);
  639. writel_relaxed_no_log(0xf0, cpu_base + GIC_CPU_PRIMASK);
  640. writel_relaxed_no_log(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
  641. }
  642. static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  643. {
  644. int i;
  645. for (i = 0; i < MAX_GIC_NR; i++) {
  646. #ifdef CONFIG_GIC_NON_BANKED
  647. /* Skip over unused GICs */
  648. if (!gic_data[i].get_base)
  649. continue;
  650. #endif
  651. switch (cmd) {
  652. case CPU_PM_ENTER:
  653. gic_cpu_save(i);
  654. break;
  655. case CPU_PM_ENTER_FAILED:
  656. case CPU_PM_EXIT:
  657. gic_cpu_restore(i);
  658. break;
  659. case CPU_CLUSTER_PM_ENTER:
  660. gic_dist_save(i);
  661. break;
  662. case CPU_CLUSTER_PM_ENTER_FAILED:
  663. case CPU_CLUSTER_PM_EXIT:
  664. gic_dist_restore(i);
  665. break;
  666. }
  667. }
  668. return NOTIFY_OK;
  669. }
  670. static struct notifier_block gic_notifier_block = {
  671. .notifier_call = gic_notifier,
  672. };
  673. static void __init gic_pm_init(struct gic_chip_data *gic)
  674. {
  675. gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
  676. sizeof(u32));
  677. BUG_ON(!gic->saved_ppi_enable);
  678. gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
  679. sizeof(u32));
  680. BUG_ON(!gic->saved_ppi_conf);
  681. if (gic == &gic_data[0])
  682. cpu_pm_register_notifier(&gic_notifier_block);
  683. }
  684. #else
  685. static void __init gic_pm_init(struct gic_chip_data *gic)
  686. {
  687. }
  688. static void gic_cpu_restore(unsigned int gic_nr)
  689. {
  690. }
  691. static void gic_cpu_save(unsigned int gic_nr)
  692. {
  693. }
  694. static void gic_dist_restore(unsigned int gic_nr)
  695. {
  696. }
  697. static void gic_dist_save(unsigned int gic_nr)
  698. {
  699. }
  700. #endif
  701. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  702. irq_hw_number_t hw)
  703. {
  704. if (hw < 32) {
  705. irq_set_percpu_devid(irq);
  706. irq_set_chip_and_handler(irq, &gic_chip,
  707. handle_percpu_devid_irq);
  708. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  709. } else {
  710. irq_set_chip_and_handler(irq, &gic_chip,
  711. handle_fasteoi_irq);
  712. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  713. }
  714. irq_set_chip_data(irq, d->host_data);
  715. return 0;
  716. }
  717. static int gic_irq_domain_xlate(struct irq_domain *d,
  718. struct device_node *controller,
  719. const u32 *intspec, unsigned int intsize,
  720. unsigned long *out_hwirq, unsigned int *out_type)
  721. {
  722. if (d->of_node != controller)
  723. return -EINVAL;
  724. if (intsize < 3)
  725. return -EINVAL;
  726. /* Get the interrupt number and add 16 to skip over SGIs */
  727. *out_hwirq = intspec[1] + 16;
  728. /* For SPIs, we need to add 16 more to get the GIC irq ID number */
  729. if (!intspec[0])
  730. *out_hwirq += 16;
  731. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  732. return 0;
  733. }
  734. const struct irq_domain_ops gic_irq_domain_ops = {
  735. .map = gic_irq_domain_map,
  736. .xlate = gic_irq_domain_xlate,
  737. };
  738. void __init gic_init_bases(unsigned int gic_nr, int irq_start,
  739. void __iomem *dist_base, void __iomem *cpu_base,
  740. u32 percpu_offset, struct device_node *node)
  741. {
  742. irq_hw_number_t hwirq_base;
  743. struct gic_chip_data *gic;
  744. int gic_irqs, irq_base;
  745. BUG_ON(gic_nr >= MAX_GIC_NR);
  746. gic = &gic_data[gic_nr];
  747. if (cpu_is_msm8625() &&
  748. (SOCINFO_VERSION_MAJOR(socinfo_get_version()) <= 1))
  749. gic->need_access_lock = true;
  750. #ifdef CONFIG_GIC_NON_BANKED
  751. if (percpu_offset) { /* Frankein-GIC without banked registers... */
  752. unsigned int cpu;
  753. gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
  754. gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
  755. if (WARN_ON(!gic->dist_base.percpu_base ||
  756. !gic->cpu_base.percpu_base)) {
  757. free_percpu(gic->dist_base.percpu_base);
  758. free_percpu(gic->cpu_base.percpu_base);
  759. return;
  760. }
  761. for_each_possible_cpu(cpu) {
  762. unsigned long offset = percpu_offset * cpu_logical_map(cpu);
  763. *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
  764. *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
  765. }
  766. gic_set_base_accessor(gic, gic_get_percpu_base);
  767. } else
  768. #endif
  769. { /* Normal, sane GIC... */
  770. WARN(percpu_offset,
  771. "GIC_NON_BANKED not enabled, ignoring %08x offset!",
  772. percpu_offset);
  773. gic->dist_base.common_base = dist_base;
  774. gic->cpu_base.common_base = cpu_base;
  775. gic_set_base_accessor(gic, gic_get_common_base);
  776. }
  777. /*
  778. * For primary GICs, skip over SGIs.
  779. * For secondary GICs, skip over PPIs, too.
  780. */
  781. if (gic_nr == 0 && (irq_start & 31) > 0) {
  782. hwirq_base = 16;
  783. if (irq_start != -1)
  784. irq_start = (irq_start & ~31) + 16;
  785. } else {
  786. hwirq_base = 32;
  787. }
  788. /*
  789. * Find out how many interrupts are supported.
  790. * The GIC only supports up to 1020 interrupt sources.
  791. */
  792. gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
  793. gic_irqs = (gic_irqs + 1) * 32;
  794. if (gic_irqs > 1020)
  795. gic_irqs = 1020;
  796. gic->gic_irqs = gic_irqs;
  797. gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
  798. irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
  799. if (IS_ERR_VALUE(irq_base)) {
  800. WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  801. irq_start);
  802. irq_base = irq_start;
  803. }
  804. gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
  805. hwirq_base, &gic_irq_domain_ops, gic);
  806. if (WARN_ON(!gic->domain))
  807. return;
  808. gic_chip.flags |= gic_arch_extn.flags;
  809. gic_dist_init(gic);
  810. gic_cpu_init(gic);
  811. gic_pm_init(gic);
  812. }
  813. void __cpuinit gic_secondary_init(unsigned int gic_nr)
  814. {
  815. BUG_ON(gic_nr >= MAX_GIC_NR);
  816. gic_cpu_init(&gic_data[gic_nr]);
  817. }
  818. #ifdef CONFIG_SMP
  819. void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  820. {
  821. int cpu;
  822. unsigned long sgir;
  823. unsigned long map = 0;
  824. unsigned long flags = 0;
  825. struct gic_chip_data *gic = &gic_data[0];
  826. /* Convert our logical CPU mask into a physical one. */
  827. for_each_cpu(cpu, mask)
  828. map |= 1 << cpu_logical_map(cpu);
  829. sgir = (map << 16) | irq;
  830. if (is_cpu_secure())
  831. sgir |= (1 << 15);
  832. /*
  833. * Ensure that stores to Normal memory are visible to the
  834. * other CPUs before issuing the IPI.
  835. */
  836. dsb();
  837. if (gic->need_access_lock)
  838. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  839. /* this always happens on GIC0 */
  840. writel_relaxed_no_log(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
  841. if (gic->need_access_lock)
  842. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  843. mb();
  844. }
  845. #endif
  846. void gic_set_irq_secure(unsigned int irq)
  847. {
  848. unsigned int gicd_isr_reg, gicd_pri_reg;
  849. unsigned int mask = 0xFFFFFF00;
  850. struct gic_chip_data *gic_data = &gic_data[0];
  851. struct irq_data *d = irq_get_irq_data(irq);
  852. if (is_cpu_secure()) {
  853. raw_spin_lock(&irq_controller_lock);
  854. gicd_isr_reg = readl_relaxed(gic_dist_base(d) +
  855. GIC_DIST_ISR + gic_irq(d) / 32 * 4);
  856. gicd_isr_reg &= ~BIT(gic_irq(d) % 32);
  857. writel_relaxed(gicd_isr_reg, gic_dist_base(d) +
  858. GIC_DIST_ISR + gic_irq(d) / 32 * 4);
  859. /* Also increase the priority of that irq */
  860. gicd_pri_reg = readl_relaxed(gic_dist_base(d) +
  861. GIC_DIST_PRI + (gic_irq(d) * 4 / 4));
  862. gicd_pri_reg &= mask;
  863. gicd_pri_reg |= 0x80; /* Priority of 0x80 > 0xA0 */
  864. writel_relaxed(gicd_pri_reg, gic_dist_base(d) + GIC_DIST_PRI +
  865. gic_irq(d) * 4 / 4);
  866. mb();
  867. raw_spin_unlock(&irq_controller_lock);
  868. } else {
  869. WARN(1, "Trying to run secure operation from Non-secure mode");
  870. }
  871. }
  872. #ifdef CONFIG_OF
  873. static int gic_cnt __initdata = 0;
  874. int __init gic_of_init(struct device_node *node, struct device_node *parent)
  875. {
  876. void __iomem *cpu_base;
  877. void __iomem *dist_base;
  878. u32 percpu_offset;
  879. int irq;
  880. if (WARN_ON(!node))
  881. return -ENODEV;
  882. dist_base = of_iomap(node, 0);
  883. WARN(!dist_base, "unable to map gic dist registers\n");
  884. cpu_base = of_iomap(node, 1);
  885. WARN(!cpu_base, "unable to map gic cpu registers\n");
  886. if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
  887. percpu_offset = 0;
  888. gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
  889. if (parent) {
  890. irq = irq_of_parse_and_map(node, 0);
  891. gic_cascade_irq(gic_cnt, irq);
  892. }
  893. gic_cnt++;
  894. return 0;
  895. }
  896. #endif
  897. /*
  898. * Before calling this function the interrupts should be disabled
  899. * and the irq must be disabled at gic to avoid spurious interrupts
  900. */
  901. bool gic_is_irq_pending(unsigned int irq)
  902. {
  903. struct irq_data *d = irq_get_irq_data(irq);
  904. struct gic_chip_data *gic_data = &gic_data[0];
  905. u32 mask, val;
  906. WARN_ON(!irqs_disabled());
  907. raw_spin_lock(&irq_controller_lock);
  908. mask = 1 << (gic_irq(d) % 32);
  909. val = readl(gic_dist_base(d) +
  910. GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
  911. /* warn if the interrupt is enabled */
  912. WARN_ON(val & mask);
  913. val = readl(gic_dist_base(d) +
  914. GIC_DIST_PENDING_SET + (gic_irq(d) / 32) * 4);
  915. raw_spin_unlock(&irq_controller_lock);
  916. return (bool) (val & mask);
  917. }
  918. /*
  919. * Before calling this function the interrupts should be disabled
  920. * and the irq must be disabled at gic to avoid spurious interrupts
  921. */
  922. void gic_clear_irq_pending(unsigned int irq)
  923. {
  924. struct gic_chip_data *gic_data = &gic_data[0];
  925. struct irq_data *d = irq_get_irq_data(irq);
  926. u32 mask, val;
  927. WARN_ON(!irqs_disabled());
  928. raw_spin_lock(&irq_controller_lock);
  929. mask = 1 << (gic_irq(d) % 32);
  930. val = readl(gic_dist_base(d) +
  931. GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
  932. /* warn if the interrupt is enabled */
  933. WARN_ON(val & mask);
  934. writel(mask, gic_dist_base(d) +
  935. GIC_DIST_PENDING_CLEAR + (gic_irq(d) / 32) * 4);
  936. raw_spin_unlock(&irq_controller_lock);
  937. }
  938. #ifdef CONFIG_ARCH_MSM8625
  939. /*
  940. * Check for any interrupts which are enabled are pending
  941. * in the pending set or not.
  942. * Return :
  943. * 0 : No pending interrupts
  944. * 1 : Pending interrupts other than A9_M2A_5
  945. */
  946. unsigned int msm_gic_spi_ppi_pending(void)
  947. {
  948. unsigned int i, bit = 0;
  949. unsigned int pending_enb = 0, pending = 0;
  950. unsigned long value = 0;
  951. struct gic_chip_data *gic = &gic_data[0];
  952. void __iomem *base = gic_data_dist_base(gic);
  953. unsigned long flags;
  954. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  955. /*
  956. * PPI and SGI to be included.
  957. * MSM8625_INT_A9_M2A_5 needs to be ignored, as A9_M2A_5
  958. * requesting sleep triggers it
  959. */
  960. for (i = 0; (i * 32) < gic->max_irq; i++) {
  961. pending = readl_relaxed(base +
  962. GIC_DIST_PENDING_SET + i * 4);
  963. pending_enb = readl_relaxed(base +
  964. GIC_DIST_ENABLE_SET + i * 4);
  965. value = pending & pending_enb;
  966. if (value) {
  967. for (bit = 0; bit < 32; bit++) {
  968. bit = find_next_bit(&value, 32, bit);
  969. if ((bit + 32 * i) != MSM8625_INT_A9_M2A_5) {
  970. raw_spin_unlock_irqrestore(
  971. &irq_controller_lock, flags);
  972. return 1;
  973. }
  974. }
  975. }
  976. }
  977. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  978. return 0;
  979. }
  980. #endif
  981. void msm_gic_save(void)
  982. {
  983. unsigned int i;
  984. struct gic_chip_data *gic = &gic_data[0];
  985. void __iomem *base = gic_data_dist_base(gic);
  986. gic_cpu_save(0);
  987. gic_dist_save(0);
  988. /* Disable all the Interrupts, before we enter pc */
  989. for (i = 0; (i * 32) < gic->max_irq; i++) {
  990. raw_spin_lock(&irq_controller_lock);
  991. writel_relaxed(0xffffffff, base
  992. + GIC_DIST_ENABLE_CLEAR + i * 4);
  993. raw_spin_unlock(&irq_controller_lock);
  994. }
  995. }
  996. void msm_gic_restore(void)
  997. {
  998. gic_dist_restore(0);
  999. gic_cpu_restore(0);
  1000. }
  1001. /*
  1002. * Configure the GIC after we come out of power collapse.
  1003. * This function will configure some of the GIC registers so as to prepare the
  1004. * secondary cores to receive an SPI(ACSR_MP_CORE_IPC1/IPC2/IPC3, 40/92/93),
  1005. * which will bring cores out of GDFS.
  1006. */
  1007. void gic_configure_and_raise(unsigned int irq, unsigned int cpu)
  1008. {
  1009. struct gic_chip_data *gic = &gic_data[0];
  1010. struct irq_data *d = irq_get_irq_data(irq);
  1011. void __iomem *base = gic_data_dist_base(gic);
  1012. unsigned int value = 0, byte_offset, offset, bit;
  1013. unsigned long flags;
  1014. offset = ((gic_irq(d) / 32) * 4);
  1015. bit = BIT(gic_irq(d) % 32);
  1016. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  1017. value = __raw_readl(base + GIC_DIST_ACTIVE_BIT + offset);
  1018. __raw_writel(value | bit, base + GIC_DIST_ACTIVE_BIT + offset);
  1019. mb();
  1020. value = __raw_readl(base + GIC_DIST_TARGET + (gic_irq(d) / 4) * 4);
  1021. byte_offset = (gic_irq(d) % 4) * 8;
  1022. value |= 1 << (cpu + byte_offset);
  1023. __raw_writel(value, base + GIC_DIST_TARGET + (gic_irq(d) / 4) * 4);
  1024. mb();
  1025. value = __raw_readl(base + GIC_DIST_ENABLE_SET + offset);
  1026. __raw_writel(value | bit, base + GIC_DIST_ENABLE_SET + offset);
  1027. mb();
  1028. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  1029. }
  1030. #if defined (CONFIG_MACH_AFYONLTE_TMO) || defined(CONFIG_MACH_ATLANTICLTE_ATT) || defined(CONFIG_MACH_ATLANTIC3GEUR_OPEN)
  1031. void gic_dump_register_set(void)
  1032. {
  1033. struct gic_chip_data *gic_global = &gic_data[0];
  1034. void __iomem *qgic_base;
  1035. pr_err("%s Dump QGIC registers\n", __func__);
  1036. qgic_base = gic_data_dist_base(gic_global) + GIC_DIST_ENABLE_SET;
  1037. pr_err("GIC_DIST_ENABLE_SET 0x%x 0x%x 0x%x 0x%x\n\t 0x%x 0x%x 0x%x 0x%x",
  1038. __raw_readl(qgic_base + 0x0), __raw_readl(qgic_base + 0x4),
  1039. __raw_readl(qgic_base + 0x8), __raw_readl(qgic_base + 0xC),
  1040. __raw_readl(qgic_base + 0x10), __raw_readl(qgic_base + 0x14),
  1041. __raw_readl(qgic_base + 0x18), __raw_readl(qgic_base + 0x1C));
  1042. qgic_base = gic_data_dist_base(gic_global) + GIC_DIST_ENABLE_CLEAR;
  1043. pr_err("GIC_DIST_ENABLE_CLEAR 0x%x 0x%x 0x%x 0x%x\n\t 0x%x 0x%x 0x%x 0x%x",
  1044. __raw_readl(qgic_base + 0x0), __raw_readl(qgic_base + 0x4),
  1045. __raw_readl(qgic_base + 0x8), __raw_readl(qgic_base + 0xC),
  1046. __raw_readl(qgic_base + 0x10), __raw_readl(qgic_base + 0x14),
  1047. __raw_readl(qgic_base + 0x18), __raw_readl(qgic_base + 0x1C));
  1048. qgic_base = gic_data_dist_base(gic_global) + GIC_DIST_PENDING_SET;
  1049. pr_err("GIC_DIST_PENDING_SET 0x%x 0x%x 0x%x 0x%x\n\t 0x%x 0x%x 0x%x 0x%x",
  1050. __raw_readl(qgic_base + 0x0), __raw_readl(qgic_base + 0x4),
  1051. __raw_readl(qgic_base + 0x8), __raw_readl(qgic_base + 0xC),
  1052. __raw_readl(qgic_base + 0x10), __raw_readl(qgic_base + 0x14),
  1053. __raw_readl(qgic_base + 0x18), __raw_readl(qgic_base + 0x1C));
  1054. qgic_base = gic_data_dist_base(gic_global) + GIC_DIST_PENDING_CLEAR;
  1055. pr_err("GIC_DIST_PENDING_CLEAR 0x%x 0x%x 0x%x 0x%x\n\t 0x%x 0x%x 0x%x 0x%x",
  1056. __raw_readl(qgic_base + 0x0), __raw_readl(qgic_base + 0x4),
  1057. __raw_readl(qgic_base + 0x8), __raw_readl(qgic_base + 0xC),
  1058. __raw_readl(qgic_base + 0x10), __raw_readl(qgic_base + 0x14),
  1059. __raw_readl(qgic_base + 0x18), __raw_readl(qgic_base + 0x1C));
  1060. qgic_base = gic_data_dist_base(gic_global) + GIC_DIST_ACTIVE_BIT;
  1061. pr_err("GIC_DIST_ACTIVE_BIT 0x%x 0x%x 0x%x 0x%x\n\t 0x%x 0x%x 0x%x 0x%x",
  1062. __raw_readl(qgic_base + 0x0), __raw_readl(qgic_base + 0x4),
  1063. __raw_readl(qgic_base + 0x8), __raw_readl(qgic_base + 0xC),
  1064. __raw_readl(qgic_base + 0x10), __raw_readl(qgic_base + 0x14),
  1065. __raw_readl(qgic_base + 0x18), __raw_readl(qgic_base + 0x1C));
  1066. pr_err("%s Dump QGIC registers done\n", __func__);
  1067. }
  1068. #endif