qpnp-int.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) "%s: " fmt, __func__
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/err.h>
  16. #include <linux/module.h>
  17. #include <linux/list.h>
  18. #include <linux/of.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/irqdomain.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spmi.h>
  24. #include <linux/radix-tree.h>
  25. #include <linux/slab.h>
  26. #include <linux/printk.h>
  27. #include <linux/ratelimit.h>
  28. #include <linux/wakeup_reason.h>
  29. #include <asm/irq.h>
  30. #include <asm/mach/irq.h>
  31. #include <mach/qpnp-int.h>
  32. /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
  33. #define QPNPINT_NR_IRQS (16 * 256 * 8)
  34. /* This value is guaranteed not to be valid for private data */
  35. #define QPNPINT_INVALID_DATA 0x80000000
  36. #ifdef CONFIG_SEC_PM_DEBUG
  37. enum {
  38. MSM_QPNP_INT_DBG_DISABLED = 0,
  39. MSM_QPNP_INT_DBG_SHOW_IRQ = BIT(0),
  40. };
  41. int msm_qpnp_int_debug_mask = MSM_QPNP_INT_DBG_DISABLED;
  42. module_param_named(
  43. debug_mask, msm_qpnp_int_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
  44. );
  45. #endif
  46. enum qpnpint_regs {
  47. QPNPINT_REG_RT_STS = 0x10,
  48. QPNPINT_REG_SET_TYPE = 0x11,
  49. QPNPINT_REG_POLARITY_HIGH = 0x12,
  50. QPNPINT_REG_POLARITY_LOW = 0x13,
  51. QPNPINT_REG_LATCHED_CLR = 0x14,
  52. QPNPINT_REG_EN_SET = 0x15,
  53. QPNPINT_REG_EN_CLR = 0x16,
  54. QPNPINT_REG_LATCHED_STS = 0x18,
  55. };
  56. struct q_perip_data {
  57. uint8_t type; /* bitmap */
  58. uint8_t pol_high; /* bitmap */
  59. uint8_t pol_low; /* bitmap */
  60. uint8_t int_en; /* bitmap */
  61. uint8_t use_count;
  62. spinlock_t lock;
  63. };
  64. struct q_irq_data {
  65. uint32_t priv_d; /* data to optimize arbiter interactions */
  66. struct q_chip_data *chip_d;
  67. struct q_perip_data *per_d;
  68. uint8_t mask_shift;
  69. uint8_t spmi_slave;
  70. uint16_t spmi_offset;
  71. };
  72. struct q_chip_data {
  73. int bus_nr;
  74. struct irq_domain *domain;
  75. struct qpnp_local_int *cb;
  76. struct spmi_controller *spmi_ctrl;
  77. struct radix_tree_root per_tree;
  78. struct list_head list;
  79. };
  80. static LIST_HEAD(qpnpint_chips);
  81. static DEFINE_MUTEX(qpnpint_chips_mutex);
  82. #define QPNPINT_MAX_BUSSES 4
  83. struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES];
  84. /**
  85. * qpnpint_encode_hwirq - translate between qpnp_irq_spec and
  86. * hwirq representation.
  87. *
  88. * slave_offset = (addr->slave * 256 * 8);
  89. * perip_offset = slave_offset + (addr->perip * 8);
  90. * return perip_offset + addr->irq;
  91. */
  92. static inline int qpnpint_encode_hwirq(struct qpnp_irq_spec *spec)
  93. {
  94. uint32_t hwirq;
  95. if (spec->slave > 15 || spec->irq > 7)
  96. return -EINVAL;
  97. hwirq = (spec->slave << 11);
  98. hwirq |= (spec->per << 3);
  99. hwirq |= spec->irq;
  100. return hwirq;
  101. }
  102. /**
  103. * qpnpint_decode_hwirq - translate between hwirq and
  104. * qpnp_irq_spec representation.
  105. */
  106. static inline int qpnpint_decode_hwirq(unsigned long hwirq,
  107. struct qpnp_irq_spec *spec)
  108. {
  109. if (hwirq > 65535)
  110. return -EINVAL;
  111. spec->slave = (hwirq >> 11) & 0xF;
  112. spec->per = (hwirq >> 3) & 0xFF;
  113. spec->irq = hwirq & 0x7;
  114. return 0;
  115. }
  116. static int qpnpint_spmi_read(struct q_irq_data *irq_d, uint8_t reg,
  117. void *buf, uint32_t len)
  118. {
  119. struct q_chip_data *chip_d = irq_d->chip_d;
  120. if (!chip_d->spmi_ctrl)
  121. return -ENODEV;
  122. return spmi_ext_register_readl(chip_d->spmi_ctrl, irq_d->spmi_slave,
  123. irq_d->spmi_offset + reg, buf, len);
  124. }
  125. static int qpnpint_spmi_write(struct q_irq_data *irq_d, uint8_t reg,
  126. void *buf, uint32_t len)
  127. {
  128. struct q_chip_data *chip_d = irq_d->chip_d;
  129. int rc;
  130. if (!chip_d->spmi_ctrl)
  131. return -ENODEV;
  132. rc = spmi_ext_register_writel(chip_d->spmi_ctrl, irq_d->spmi_slave,
  133. irq_d->spmi_offset + reg, buf, len);
  134. return rc;
  135. }
  136. static int qpnpint_arbiter_op(struct irq_data *d,
  137. struct q_irq_data *irq_d,
  138. int (*arb_op)(struct spmi_controller *,
  139. struct qpnp_irq_spec *,
  140. uint32_t))
  141. {
  142. struct q_chip_data *chip_d = irq_d->chip_d;
  143. struct qpnp_irq_spec q_spec;
  144. int rc;
  145. if (!arb_op)
  146. return 0;
  147. if (!chip_d->cb->register_priv_data) {
  148. pr_warn_ratelimited("No ability to register arbiter registration data\n");
  149. return -ENODEV;
  150. }
  151. rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
  152. if (rc) {
  153. pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
  154. __func__, d->hwirq);
  155. return rc;
  156. }
  157. if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
  158. rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
  159. &q_spec, &irq_d->priv_d);
  160. if (rc) {
  161. pr_err_ratelimited(
  162. "%s: decode failed on hwirq %lu rc = %d\n",
  163. __func__, d->hwirq, rc);
  164. return rc;
  165. }
  166. }
  167. arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
  168. return 0;
  169. }
  170. static void qpnpint_irq_ack(struct irq_data *d)
  171. {
  172. struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
  173. int rc;
  174. pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
  175. rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
  176. &irq_d->mask_shift, 1);
  177. if (rc) {
  178. pr_err_ratelimited("spmi write failure on irq %d, rc=%d\n",
  179. d->irq, rc);
  180. return;
  181. }
  182. }
  183. static void qpnpint_irq_mask(struct irq_data *d)
  184. {
  185. struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
  186. struct q_chip_data *chip_d = irq_d->chip_d;
  187. struct q_perip_data *per_d = irq_d->per_d;
  188. int rc;
  189. uint8_t prev_int_en;
  190. pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
  191. if (!chip_d->cb) {
  192. pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
  193. chip_d->bus_nr, irq_d->spmi_slave,
  194. irq_d->spmi_offset);
  195. return;
  196. }
  197. spin_lock(&per_d->lock);
  198. prev_int_en = per_d->int_en;
  199. per_d->int_en &= ~irq_d->mask_shift;
  200. if (prev_int_en && !(per_d->int_en)) {
  201. /*
  202. * no interrupt on this peripheral is enabled
  203. * ask the arbiter to ignore this peripheral
  204. */
  205. qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
  206. }
  207. spin_unlock(&per_d->lock);
  208. rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
  209. (u8 *)&irq_d->mask_shift, 1);
  210. if (rc) {
  211. pr_err_ratelimited("spmi failure on irq %d\n", d->irq);
  212. return;
  213. }
  214. pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq);
  215. }
  216. static void qpnpint_irq_mask_ack(struct irq_data *d)
  217. {
  218. pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
  219. qpnpint_irq_mask(d);
  220. qpnpint_irq_ack(d);
  221. }
  222. static void qpnpint_irq_unmask(struct irq_data *d)
  223. {
  224. struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
  225. struct q_chip_data *chip_d = irq_d->chip_d;
  226. struct q_perip_data *per_d = irq_d->per_d;
  227. int rc;
  228. uint8_t buf[2];
  229. uint8_t prev_int_en;
  230. pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
  231. if (!chip_d->cb) {
  232. pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
  233. chip_d->bus_nr, irq_d->spmi_slave,
  234. irq_d->spmi_offset);
  235. return;
  236. }
  237. spin_lock(&per_d->lock);
  238. prev_int_en = per_d->int_en;
  239. per_d->int_en |= irq_d->mask_shift;
  240. if (!prev_int_en && per_d->int_en) {
  241. /*
  242. * no interrupt prior to this call was enabled for the
  243. * peripheral. Ask the arbiter to enable interrupts for
  244. * this peripheral
  245. */
  246. qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
  247. }
  248. spin_unlock(&per_d->lock);
  249. /* Check the current state of the interrupt enable bit. */
  250. rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_EN_SET, buf, 1);
  251. if (rc) {
  252. pr_err("SPMI read failure for IRQ %d, rc=%d\n", d->irq, rc);
  253. return;
  254. }
  255. if (!(buf[0] & irq_d->mask_shift)) {
  256. /*
  257. * Since the interrupt is currently disabled, write to both the
  258. * LATCHED_CLR and EN_SET registers so that a spurious interrupt
  259. * cannot be triggered when the interrupt is enabled.
  260. */
  261. buf[0] = irq_d->mask_shift;
  262. buf[1] = irq_d->mask_shift;
  263. rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR, buf, 2);
  264. if (rc) {
  265. pr_err("SPMI write failure for IRQ %d, rc=%d\n", d->irq,
  266. rc);
  267. return;
  268. }
  269. }
  270. }
  271. static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
  272. {
  273. struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
  274. struct q_perip_data *per_d = irq_d->per_d;
  275. int rc;
  276. u8 buf[3];
  277. pr_debug("hwirq %lu irq: %d flow: 0x%x\n", d->hwirq,
  278. d->irq, flow_type);
  279. per_d->pol_high &= ~irq_d->mask_shift;
  280. per_d->pol_low &= ~irq_d->mask_shift;
  281. if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
  282. per_d->type |= irq_d->mask_shift; /* edge trig */
  283. if (flow_type & IRQF_TRIGGER_RISING)
  284. per_d->pol_high |= irq_d->mask_shift;
  285. if (flow_type & IRQF_TRIGGER_FALLING)
  286. per_d->pol_low |= irq_d->mask_shift;
  287. } else {
  288. if ((flow_type & IRQF_TRIGGER_HIGH) &&
  289. (flow_type & IRQF_TRIGGER_LOW))
  290. return -EINVAL;
  291. per_d->type &= ~irq_d->mask_shift; /* level trig */
  292. if (flow_type & IRQF_TRIGGER_HIGH)
  293. per_d->pol_high |= irq_d->mask_shift;
  294. else
  295. per_d->pol_low |= irq_d->mask_shift;
  296. }
  297. buf[0] = per_d->type;
  298. buf[1] = per_d->pol_high;
  299. buf[2] = per_d->pol_low;
  300. rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
  301. if (rc) {
  302. pr_err("spmi failure on irq %d\n", d->irq);
  303. return rc;
  304. }
  305. if (flow_type & IRQ_TYPE_EDGE_BOTH)
  306. __irq_set_handler_locked(d->irq, handle_edge_irq);
  307. else
  308. __irq_set_handler_locked(d->irq, handle_level_irq);
  309. return 0;
  310. }
  311. static int qpnpint_irq_read_line(struct irq_data *d)
  312. {
  313. struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
  314. int rc;
  315. u8 buf;
  316. pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
  317. rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_RT_STS, &buf, 1);
  318. if (rc) {
  319. pr_err("spmi failure on irq %d\n", d->irq);
  320. return rc;
  321. }
  322. return (buf & irq_d->mask_shift) ? 1 : 0;
  323. }
  324. static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
  325. {
  326. return 0;
  327. }
  328. static struct irq_chip qpnpint_chip = {
  329. .name = "qpnp-int",
  330. .irq_ack = qpnpint_irq_ack,
  331. .irq_mask = qpnpint_irq_mask,
  332. .irq_mask_ack = qpnpint_irq_mask_ack,
  333. .irq_unmask = qpnpint_irq_unmask,
  334. .irq_set_type = qpnpint_irq_set_type,
  335. .irq_read_line = qpnpint_irq_read_line,
  336. .irq_set_wake = qpnpint_irq_set_wake,
  337. .flags = IRQCHIP_MASK_ON_SUSPEND,
  338. };
  339. static int qpnpint_init_irq_data(struct q_chip_data *chip_d,
  340. struct q_irq_data *irq_d,
  341. unsigned long hwirq)
  342. {
  343. struct qpnp_irq_spec q_spec;
  344. int rc;
  345. irq_d->mask_shift = 1 << (hwirq & 0x7);
  346. rc = qpnpint_decode_hwirq(hwirq, &q_spec);
  347. if (rc < 0)
  348. return rc;
  349. irq_d->spmi_slave = q_spec.slave;
  350. irq_d->spmi_offset = q_spec.per << 8;
  351. irq_d->chip_d = chip_d;
  352. irq_d->priv_d = QPNPINT_INVALID_DATA;
  353. if (chip_d->cb && chip_d->cb->register_priv_data) {
  354. rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl, &q_spec,
  355. &irq_d->priv_d);
  356. if (rc)
  357. return rc;
  358. }
  359. irq_d->per_d->use_count++;
  360. return 0;
  361. }
  362. static struct q_irq_data *qpnpint_alloc_irq_data(
  363. struct q_chip_data *chip_d,
  364. unsigned long hwirq)
  365. {
  366. struct q_irq_data *irq_d;
  367. struct q_perip_data *per_d;
  368. int rc;
  369. irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
  370. if (!irq_d)
  371. return ERR_PTR(-ENOMEM);
  372. /**
  373. * The Peripheral Tree is keyed from the slave + per_id. We're
  374. * ignoring the irq bits here since this peripheral structure
  375. * should be common for all irqs on the same peripheral.
  376. */
  377. per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
  378. if (!per_d) {
  379. per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
  380. if (!per_d) {
  381. rc = -ENOMEM;
  382. goto alloc_fail;
  383. }
  384. spin_lock_init(&per_d->lock);
  385. rc = radix_tree_preload(GFP_KERNEL);
  386. if (rc)
  387. goto alloc_fail;
  388. rc = radix_tree_insert(&chip_d->per_tree,
  389. (hwirq & ~0x7), per_d);
  390. if (rc)
  391. goto alloc_fail;
  392. radix_tree_preload_end();
  393. }
  394. irq_d->per_d = per_d;
  395. return irq_d;
  396. alloc_fail:
  397. kfree(per_d);
  398. kfree(irq_d);
  399. return ERR_PTR(rc);
  400. }
  401. static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
  402. struct device_node *controller,
  403. const u32 *intspec, unsigned int intsize,
  404. unsigned long *out_hwirq,
  405. unsigned int *out_type)
  406. {
  407. struct qpnp_irq_spec addr;
  408. int ret;
  409. pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
  410. intspec[0], intspec[1], intspec[2]);
  411. if (d->of_node != controller)
  412. return -EINVAL;
  413. if (intsize != 3)
  414. return -EINVAL;
  415. addr.irq = intspec[2] & 0x7;
  416. addr.per = intspec[1] & 0xFF;
  417. addr.slave = intspec[0] & 0xF;
  418. ret = qpnpint_encode_hwirq(&addr);
  419. if (ret < 0) {
  420. pr_err("invalid intspec\n");
  421. return ret;
  422. }
  423. *out_hwirq = ret;
  424. *out_type = IRQ_TYPE_NONE;
  425. pr_debug("out_hwirq = %lu\n", *out_hwirq);
  426. return 0;
  427. }
  428. static void qpnpint_free_irq_data(struct q_irq_data *irq_d)
  429. {
  430. if (irq_d->per_d->use_count == 1)
  431. kfree(irq_d->per_d);
  432. else
  433. irq_d->per_d->use_count--;
  434. kfree(irq_d);
  435. }
  436. static int qpnpint_irq_domain_map(struct irq_domain *d,
  437. unsigned int virq, irq_hw_number_t hwirq)
  438. {
  439. struct q_chip_data *chip_d = d->host_data;
  440. struct q_irq_data *irq_d;
  441. int rc;
  442. pr_debug("hwirq = %lu\n", hwirq);
  443. if (hwirq < 0 || hwirq >= QPNPINT_NR_IRQS) {
  444. pr_err("hwirq %lu out of bounds\n", hwirq);
  445. return -EINVAL;
  446. }
  447. irq_radix_revmap_insert(d, virq, hwirq);
  448. irq_d = qpnpint_alloc_irq_data(chip_d, hwirq);
  449. if (IS_ERR(irq_d)) {
  450. pr_err("failed to alloc irq data for hwirq %lu\n", hwirq);
  451. return PTR_ERR(irq_d);
  452. }
  453. rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq);
  454. if (rc) {
  455. pr_err("failed to init irq data for hwirq %lu\n", hwirq);
  456. goto map_err;
  457. }
  458. irq_set_chip_and_handler(virq,
  459. &qpnpint_chip,
  460. handle_level_irq);
  461. irq_set_chip_data(virq, irq_d);
  462. #ifdef CONFIG_ARM
  463. set_irq_flags(virq, IRQF_VALID);
  464. #else
  465. irq_set_noprobe(virq);
  466. #endif
  467. return 0;
  468. map_err:
  469. qpnpint_free_irq_data(irq_d);
  470. return rc;
  471. }
  472. void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
  473. {
  474. struct q_irq_data *irq_d = irq_get_chip_data(virq);
  475. if (WARN_ON(!irq_d))
  476. return;
  477. qpnpint_free_irq_data(irq_d);
  478. }
  479. const struct irq_domain_ops qpnpint_irq_domain_ops = {
  480. .map = qpnpint_irq_domain_map,
  481. .unmap = qpnpint_irq_domain_unmap,
  482. .xlate = qpnpint_irq_domain_dt_translate,
  483. };
  484. int qpnpint_register_controller(struct device_node *node,
  485. struct spmi_controller *ctrl,
  486. struct qpnp_local_int *li_cb)
  487. {
  488. struct q_chip_data *chip_d;
  489. if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES)
  490. return -EINVAL;
  491. list_for_each_entry(chip_d, &qpnpint_chips, list)
  492. if (node == chip_d->domain->of_node) {
  493. chip_d->cb = kmemdup(li_cb,
  494. sizeof(*li_cb), GFP_ATOMIC);
  495. if (!chip_d->cb)
  496. return -ENOMEM;
  497. chip_d->spmi_ctrl = ctrl;
  498. chip_lookup[ctrl->nr] = chip_d;
  499. return 0;
  500. }
  501. return -ENOENT;
  502. }
  503. EXPORT_SYMBOL(qpnpint_register_controller);
  504. int qpnpint_unregister_controller(struct device_node *node)
  505. {
  506. struct q_chip_data *chip_d;
  507. if (!node)
  508. return -EINVAL;
  509. list_for_each_entry(chip_d, &qpnpint_chips, list)
  510. if (node == chip_d->domain->of_node) {
  511. kfree(chip_d->cb);
  512. chip_d->cb = NULL;
  513. if (chip_d->spmi_ctrl)
  514. chip_lookup[chip_d->spmi_ctrl->nr] = NULL;
  515. chip_d->spmi_ctrl = NULL;
  516. return 0;
  517. }
  518. return -ENOENT;
  519. }
  520. EXPORT_SYMBOL(qpnpint_unregister_controller);
  521. static int __qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
  522. struct qpnp_irq_spec *spec,
  523. bool show)
  524. {
  525. struct irq_domain *domain;
  526. unsigned long hwirq, busno;
  527. int irq;
  528. if (!spec || !spmi_ctrl)
  529. return -EINVAL;
  530. pr_debug("spec slave = %u per = %u irq = %u\n",
  531. spec->slave, spec->per, spec->irq);
  532. busno = spmi_ctrl->nr;
  533. if (busno >= QPNPINT_MAX_BUSSES)
  534. return -EINVAL;
  535. hwirq = qpnpint_encode_hwirq(spec);
  536. if (hwirq < 0) {
  537. pr_err("invalid irq spec passed\n");
  538. return -EINVAL;
  539. }
  540. domain = chip_lookup[busno]->domain;
  541. irq = irq_radix_revmap_lookup(domain, hwirq);
  542. #ifdef CONFIG_SEC_PM_DEBUG
  543. if (msm_qpnp_int_debug_mask & MSM_QPNP_INT_DBG_SHOW_IRQ) {
  544. struct irq_desc *desc;
  545. const char *name = "null";
  546. desc = irq_to_desc(irq);
  547. if (desc == NULL)
  548. name = "stray irq";
  549. else if (desc->action && desc->action->name)
  550. name = desc->action->name;
  551. pr_info("%d triggered [0x%01x, 0x%02x,0x%01x] %s\n",
  552. irq, spec->slave, spec->per, spec->irq, name);
  553. }
  554. #endif
  555. if (show) {
  556. struct irq_desc *desc;
  557. const char *name = "null";
  558. desc = irq_to_desc(irq);
  559. if (desc == NULL)
  560. name = "stray irq";
  561. else if (desc->action && desc->action->name)
  562. name = desc->action->name;
  563. pr_info("%d triggered [0x%01x, 0x%02x,0x%01x] %s\n",
  564. irq, spec->slave, spec->per, spec->irq, name);
  565. #ifdef CONFIG_SEC_PM_DEBUG
  566. log_wakeup_reason(irq);
  567. update_wakeup_reason_stats(irq);
  568. #endif
  569. } else {
  570. generic_handle_irq(irq);
  571. }
  572. return 0;
  573. }
  574. int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
  575. struct qpnp_irq_spec *spec)
  576. {
  577. return __qpnpint_handle_irq(spmi_ctrl, spec, false);
  578. }
  579. EXPORT_SYMBOL(qpnpint_handle_irq);
  580. int qpnpint_show_irq(struct spmi_controller *spmi_ctrl,
  581. struct qpnp_irq_spec *spec)
  582. {
  583. return __qpnpint_handle_irq(spmi_ctrl, spec, true);
  584. }
  585. EXPORT_SYMBOL(qpnpint_show_irq);
  586. int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
  587. {
  588. struct q_chip_data *chip_d;
  589. chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
  590. if (!chip_d)
  591. return -ENOMEM;
  592. chip_d->domain = irq_domain_add_tree(node,
  593. &qpnpint_irq_domain_ops, chip_d);
  594. if (!chip_d->domain) {
  595. pr_err("Unable to allocate irq_domain\n");
  596. kfree(chip_d);
  597. return -ENOMEM;
  598. }
  599. INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
  600. list_add(&chip_d->list, &qpnpint_chips);
  601. return 0;
  602. }
  603. EXPORT_SYMBOL(qpnpint_of_init);