wcd9xxx-irq.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/bitops.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <linux/irq.h>
  17. #include <linux/mfd/core.h>
  18. #include <linux/mfd/wcd9xxx/core-resource.h>
  19. #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
  20. #include <linux/mfd/wcd9xxx/wcd9310_registers.h>
  21. #include <linux/delay.h>
  22. #include <linux/irqdomain.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/of.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/slab.h>
  27. #include <linux/ratelimit.h>
  28. #include <mach/cpuidle.h>
  29. #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
  30. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  31. #define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
  32. #ifdef CONFIG_OF
  33. struct wcd9xxx_irq_drv_data {
  34. struct irq_domain *domain;
  35. int irq;
  36. };
  37. #endif
  38. static int virq_to_phyirq(
  39. struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
  40. static int phyirq_to_virq(
  41. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  42. static unsigned int wcd9xxx_irq_get_upstream_irq(
  43. struct wcd9xxx_core_resource *wcd9xxx_res);
  44. static void wcd9xxx_irq_put_upstream_irq(
  45. struct wcd9xxx_core_resource *wcd9xxx_res);
  46. static int wcd9xxx_map_irq(
  47. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  48. static void wcd9xxx_irq_lock(struct irq_data *data)
  49. {
  50. struct wcd9xxx_core_resource *wcd9xxx_res =
  51. irq_data_get_irq_chip_data(data);
  52. mutex_lock(&wcd9xxx_res->irq_lock);
  53. }
  54. static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
  55. {
  56. struct wcd9xxx_core_resource *wcd9xxx_res =
  57. irq_data_get_irq_chip_data(data);
  58. int i;
  59. if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
  60. WCD9XXX_MAX_IRQ_REGS) ||
  61. (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
  62. WCD9XXX_MAX_IRQ_REGS)) {
  63. pr_err("%s: Array Size out of bound\n", __func__);
  64. return;
  65. }
  66. if (!wcd9xxx_res->codec_reg_write) {
  67. pr_err("%s: Codec reg write callback function not defined\n",
  68. __func__);
  69. return;
  70. }
  71. for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
  72. /* If there's been a change in the mask write it back
  73. * to the hardware.
  74. */
  75. if (wcd9xxx_res->irq_masks_cur[i] !=
  76. wcd9xxx_res->irq_masks_cache[i]) {
  77. wcd9xxx_res->irq_masks_cache[i] =
  78. wcd9xxx_res->irq_masks_cur[i];
  79. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  80. WCD9XXX_A_INTR_MASK0 + i,
  81. wcd9xxx_res->irq_masks_cur[i]);
  82. }
  83. }
  84. mutex_unlock(&wcd9xxx_res->irq_lock);
  85. }
  86. static void wcd9xxx_irq_enable(struct irq_data *data)
  87. {
  88. struct wcd9xxx_core_resource *wcd9xxx_res =
  89. irq_data_get_irq_chip_data(data);
  90. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  91. wcd9xxx_res->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &=
  92. ~(BYTE_BIT_MASK(wcd9xxx_irq));
  93. }
  94. static void wcd9xxx_irq_disable(struct irq_data *data)
  95. {
  96. struct wcd9xxx_core_resource *wcd9xxx_res =
  97. irq_data_get_irq_chip_data(data);
  98. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  99. wcd9xxx_res->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)]
  100. |= BYTE_BIT_MASK(wcd9xxx_irq);
  101. }
  102. static void wcd9xxx_irq_mask(struct irq_data *d)
  103. {
  104. /* do nothing but required as linux calls irq_mask without NULL check */
  105. }
  106. static struct irq_chip wcd9xxx_irq_chip = {
  107. .name = "wcd9xxx",
  108. .irq_bus_lock = wcd9xxx_irq_lock,
  109. .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
  110. .irq_disable = wcd9xxx_irq_disable,
  111. .irq_enable = wcd9xxx_irq_enable,
  112. .irq_mask = wcd9xxx_irq_mask,
  113. };
  114. bool wcd9xxx_lock_sleep(
  115. struct wcd9xxx_core_resource *wcd9xxx_res)
  116. {
  117. enum wcd9xxx_pm_state os;
  118. /*
  119. * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
  120. * and its subroutines only motly.
  121. * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
  122. * It can race with wcd9xxx_irq_thread.
  123. * So need to embrace wlock_holders with mutex.
  124. *
  125. * If system didn't resume, we can simply return false so codec driver's
  126. * IRQ handler can return without handling IRQ.
  127. * As interrupt line is still active, codec will have another IRQ to
  128. * retry shortly.
  129. */
  130. mutex_lock(&wcd9xxx_res->pm_lock);
  131. if (wcd9xxx_res->wlock_holders++ == 0) {
  132. pr_debug("%s: holding wake lock\n", __func__);
  133. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  134. msm_cpuidle_get_deep_idle_latency());
  135. }
  136. mutex_unlock(&wcd9xxx_res->pm_lock);
  137. if (!wait_event_timeout(wcd9xxx_res->pm_wq,
  138. ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
  139. WCD9XXX_PM_SLEEPABLE,
  140. WCD9XXX_PM_AWAKE)) ==
  141. WCD9XXX_PM_SLEEPABLE ||
  142. (os == WCD9XXX_PM_AWAKE)),
  143. msecs_to_jiffies(
  144. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
  145. pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
  146. __func__,
  147. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
  148. wcd9xxx_res->wlock_holders);
  149. wcd9xxx_unlock_sleep(wcd9xxx_res);
  150. return false;
  151. }
  152. wake_up_all(&wcd9xxx_res->pm_wq);
  153. return true;
  154. }
  155. EXPORT_SYMBOL(wcd9xxx_lock_sleep);
  156. void wcd9xxx_unlock_sleep(
  157. struct wcd9xxx_core_resource *wcd9xxx_res)
  158. {
  159. mutex_lock(&wcd9xxx_res->pm_lock);
  160. if (--wcd9xxx_res->wlock_holders == 0) {
  161. pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
  162. __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
  163. /*
  164. * if wcd9xxx_lock_sleep failed, pm_state would be still
  165. * WCD9XXX_PM_ASLEEP, don't overwrite
  166. */
  167. if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
  168. wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
  169. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  170. PM_QOS_DEFAULT_VALUE);
  171. }
  172. mutex_unlock(&wcd9xxx_res->pm_lock);
  173. wake_up_all(&wcd9xxx_res->pm_wq);
  174. }
  175. EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
  176. void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
  177. {
  178. mutex_lock(&wcd9xxx_res->nested_irq_lock);
  179. }
  180. void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
  181. {
  182. mutex_unlock(&wcd9xxx_res->nested_irq_lock);
  183. }
  184. static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
  185. struct intr_data *irqdata)
  186. {
  187. int irqbit = irqdata->intr_num;
  188. if (!wcd9xxx_res->codec_reg_write) {
  189. pr_err("%s: codec read/write callback not defined\n",
  190. __func__);
  191. return;
  192. }
  193. if (irqdata->clear_first) {
  194. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  195. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  196. WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit),
  197. BYTE_BIT_MASK(irqbit));
  198. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  199. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  200. WCD9XXX_A_INTR_MODE, 0x02);
  201. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  202. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  203. } else {
  204. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  205. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  206. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  207. WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit),
  208. BYTE_BIT_MASK(irqbit));
  209. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  210. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  211. WCD9XXX_A_INTR_MODE, 0x02);
  212. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  213. }
  214. }
  215. static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
  216. {
  217. int ret;
  218. int i;
  219. struct intr_data irqdata;
  220. char linebuf[128];
  221. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
  222. struct wcd9xxx_core_resource *wcd9xxx_res = data;
  223. int num_irq_regs = wcd9xxx_res->num_irq_regs;
  224. u8 status[num_irq_regs], status1[num_irq_regs];
  225. if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
  226. dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
  227. return IRQ_NONE;
  228. }
  229. if (!wcd9xxx_res->codec_bulk_read) {
  230. dev_err(wcd9xxx_res->dev,
  231. "%s: Codec Bulk Register read callback not supplied\n",
  232. __func__);
  233. goto err_disable_irq;
  234. }
  235. ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res,
  236. WCD9XXX_A_INTR_STATUS0,
  237. num_irq_regs, status);
  238. if (ret < 0) {
  239. dev_err(wcd9xxx_res->dev,
  240. "Failed to read interrupt status: %d\n", ret);
  241. goto err_disable_irq;
  242. }
  243. /* Apply masking */
  244. for (i = 0; i < num_irq_regs; i++)
  245. status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
  246. memcpy(status1, status, sizeof(status1));
  247. /* Find out which interrupt was triggered and call that interrupt's
  248. * handler function
  249. *
  250. * Since codec has only one hardware irq line which is shared by
  251. * codec's different internal interrupts, so it's possible master irq
  252. * handler dispatches multiple nested irq handlers after breaking
  253. * order. Dispatch interrupts in the order that is maintained by
  254. * the interrupt table.
  255. */
  256. for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
  257. irqdata = wcd9xxx_res->intr_table[i];
  258. if (status[BIT_BYTE(irqdata.intr_num)] &
  259. BYTE_BIT_MASK(irqdata.intr_num)) {
  260. wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
  261. status1[BIT_BYTE(irqdata.intr_num)] &=
  262. ~BYTE_BIT_MASK(irqdata.intr_num);
  263. }
  264. }
  265. /*
  266. * As a failsafe if unhandled irq is found, clear it to prevent
  267. * interrupt storm.
  268. * Note that we can say there was an unhandled irq only when no irq
  269. * handled by nested irq handler since Taiko supports qdsp as irqs'
  270. * destination for few irqs. Therefore driver shouldn't clear pending
  271. * irqs when few handled while few others not.
  272. */
  273. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  274. if (__ratelimit(&ratelimit)) {
  275. pr_warn("%s: Unhandled irq found\n", __func__);
  276. hex_dump_to_buffer(status, sizeof(status), 16, 1,
  277. linebuf, sizeof(linebuf), false);
  278. pr_warn("%s: status0 : %s\n", __func__, linebuf);
  279. hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
  280. linebuf, sizeof(linebuf), false);
  281. pr_warn("%s: status1 : %s\n", __func__, linebuf);
  282. }
  283. memset(status, 0xff, num_irq_regs);
  284. ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
  285. WCD9XXX_A_INTR_CLEAR0,
  286. num_irq_regs, status);
  287. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  288. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  289. WCD9XXX_A_INTR_MODE, 0x02);
  290. }
  291. wcd9xxx_unlock_sleep(wcd9xxx_res);
  292. return IRQ_HANDLED;
  293. err_disable_irq:
  294. dev_err(wcd9xxx_res->dev,
  295. "Disable irq %d\n", wcd9xxx_res->irq);
  296. disable_irq_wake(wcd9xxx_res->irq);
  297. disable_irq_nosync(wcd9xxx_res->irq);
  298. wcd9xxx_unlock_sleep(wcd9xxx_res);
  299. return IRQ_NONE;
  300. }
  301. void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  302. int irq, void *data)
  303. {
  304. free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
  305. }
  306. void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  307. {
  308. enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  309. }
  310. void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  311. {
  312. disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
  313. }
  314. void wcd9xxx_disable_irq_sync(
  315. struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  316. {
  317. disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  318. }
  319. static int wcd9xxx_irq_setup_downstream_irq(
  320. struct wcd9xxx_core_resource *wcd9xxx_res)
  321. {
  322. int irq, virq, ret;
  323. pr_debug("%s: enter\n", __func__);
  324. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  325. /* Map OF irq */
  326. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  327. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  328. if (virq == NO_IRQ) {
  329. pr_err("%s, No interrupt specifier for irq %d\n",
  330. __func__, irq);
  331. return NO_IRQ;
  332. }
  333. ret = irq_set_chip_data(virq, wcd9xxx_res);
  334. if (ret) {
  335. pr_err("%s: Failed to configure irq %d (%d)\n",
  336. __func__, irq, ret);
  337. return ret;
  338. }
  339. if (wcd9xxx_res->irq_level_high[irq])
  340. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  341. handle_level_irq);
  342. else
  343. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  344. handle_edge_irq);
  345. irq_set_nested_thread(virq, 1);
  346. }
  347. pr_debug("%s: leave\n", __func__);
  348. return 0;
  349. }
  350. int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
  351. {
  352. int i, ret;
  353. u8 irq_level[wcd9xxx_res->num_irq_regs];
  354. mutex_init(&wcd9xxx_res->irq_lock);
  355. mutex_init(&wcd9xxx_res->nested_irq_lock);
  356. wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
  357. if (!wcd9xxx_res->irq) {
  358. pr_warn("%s: irq driver is not yet initialized\n", __func__);
  359. mutex_destroy(&wcd9xxx_res->irq_lock);
  360. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  361. return -EPROBE_DEFER;
  362. }
  363. pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
  364. /* Setup downstream IRQs */
  365. ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
  366. if (ret) {
  367. pr_err("%s: Failed to setup downstream IRQ\n", __func__);
  368. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  369. mutex_destroy(&wcd9xxx_res->irq_lock);
  370. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  371. return ret;
  372. }
  373. /* All other wcd9xxx interrupts are edge triggered */
  374. wcd9xxx_res->irq_level_high[0] = true;
  375. /* mask all the interrupts */
  376. memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
  377. for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
  378. wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  379. wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  380. irq_level[BIT_BYTE(i)] |=
  381. wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
  382. }
  383. if (!wcd9xxx_res->codec_reg_write) {
  384. dev_err(wcd9xxx_res->dev,
  385. "%s: Codec Register write callback not defined\n",
  386. __func__);
  387. ret = -EINVAL;
  388. goto fail_irq_init;
  389. }
  390. for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
  391. /* Initialize interrupt mask and level registers */
  392. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  393. WCD9XXX_A_INTR_LEVEL0 + i,
  394. irq_level[i]);
  395. wcd9xxx_res->codec_reg_write(wcd9xxx_res,
  396. WCD9XXX_A_INTR_MASK0 + i,
  397. wcd9xxx_res->irq_masks_cur[i]);
  398. }
  399. ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
  400. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  401. "wcd9xxx", wcd9xxx_res);
  402. if (ret != 0)
  403. dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
  404. wcd9xxx_res->irq, ret);
  405. else {
  406. ret = enable_irq_wake(wcd9xxx_res->irq);
  407. if (ret)
  408. dev_err(wcd9xxx_res->dev,
  409. "Failed to set wake interrupt on IRQ %d: %d\n",
  410. wcd9xxx_res->irq, ret);
  411. if (ret)
  412. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  413. }
  414. if (ret)
  415. goto fail_irq_init;
  416. return ret;
  417. fail_irq_init:
  418. dev_err(wcd9xxx_res->dev,
  419. "%s: Failed to init wcd9xxx irq\n", __func__);
  420. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  421. mutex_destroy(&wcd9xxx_res->irq_lock);
  422. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  423. return ret;
  424. }
  425. int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  426. int irq, irq_handler_t handler,
  427. const char *name, void *data)
  428. {
  429. int virq;
  430. virq = phyirq_to_virq(wcd9xxx_res, irq);
  431. /*
  432. * ARM needs us to explicitly flag the IRQ as valid
  433. * and will set them noprobe when we do so.
  434. */
  435. #ifdef CONFIG_ARM
  436. set_irq_flags(virq, IRQF_VALID);
  437. #else
  438. set_irq_noprobe(virq);
  439. #endif
  440. return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
  441. name, data);
  442. }
  443. void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
  444. {
  445. dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
  446. wcd9xxx_res->irq);
  447. if (wcd9xxx_res->irq) {
  448. disable_irq_wake(wcd9xxx_res->irq);
  449. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  450. /* Release parent's of node */
  451. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  452. }
  453. mutex_destroy(&wcd9xxx_res->irq_lock);
  454. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  455. }
  456. #ifndef CONFIG_OF
  457. static int phyirq_to_virq(
  458. struct wcd9xxx_core_resource *wcd9xxx_res,
  459. int offset)
  460. {
  461. return wcd9xxx_res->irq_base + offset;
  462. }
  463. static int virq_to_phyirq(
  464. struct wcd9xxx_core_resource *wcd9xxx_res,
  465. int virq)
  466. {
  467. return virq - wcd9xxx_res->irq_base;
  468. }
  469. static unsigned int wcd9xxx_irq_get_upstream_irq(
  470. struct wcd9xxx_core_resource *wcd9xxx_res)
  471. {
  472. return wcd9xxx_res->irq;
  473. }
  474. static void wcd9xxx_irq_put_upstream_irq(
  475. struct wcd9xxx_core_resource *wcd9xxx_res)
  476. {
  477. /* Do nothing */
  478. }
  479. static int wcd9xxx_map_irq(
  480. struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
  481. {
  482. return phyirq_to_virq(wcd9xxx_core_res, irq);
  483. }
  484. #else
  485. int __init wcd9xxx_irq_of_init(struct device_node *node,
  486. struct device_node *parent)
  487. {
  488. struct wcd9xxx_irq_drv_data *data;
  489. pr_debug("%s: node %s, node parent %s\n", __func__,
  490. node->name, node->parent->name);
  491. data = kzalloc(sizeof(*data), GFP_KERNEL);
  492. if (!data)
  493. return -ENOMEM;
  494. /*
  495. * wcd9xxx_intc interrupt controller supports N to N irq mapping with
  496. * single cell binding with irq numbers(offsets) only.
  497. * Use irq_domain_simple_ops that has irq_domain_simple_map and
  498. * irq_domain_xlate_onetwocell.
  499. */
  500. data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
  501. &irq_domain_simple_ops, data);
  502. if (!data->domain) {
  503. kfree(data);
  504. return -ENOMEM;
  505. }
  506. return 0;
  507. }
  508. static struct wcd9xxx_irq_drv_data *
  509. wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
  510. {
  511. struct device_node *pnode;
  512. struct irq_domain *domain;
  513. pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
  514. /* Shouldn't happen */
  515. if (unlikely(!pnode))
  516. return NULL;
  517. domain = irq_find_host(pnode);
  518. return (struct wcd9xxx_irq_drv_data *)domain->host_data;
  519. }
  520. static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
  521. {
  522. struct wcd9xxx_irq_drv_data *data;
  523. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  524. if (!data) {
  525. pr_warn("%s: not registered to interrupt controller\n",
  526. __func__);
  527. return -EINVAL;
  528. }
  529. return irq_linear_revmap(data->domain, offset);
  530. }
  531. static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
  532. {
  533. struct irq_data *irq_data = irq_get_irq_data(virq);
  534. if (unlikely(!irq_data)) {
  535. pr_err("%s: irq_data is NULL", __func__);
  536. return -EINVAL;
  537. }
  538. return irq_data->hwirq;
  539. }
  540. static unsigned int wcd9xxx_irq_get_upstream_irq(
  541. struct wcd9xxx_core_resource *wcd9xxx_res)
  542. {
  543. struct wcd9xxx_irq_drv_data *data;
  544. /* Hold parent's of node */
  545. if (!of_node_get(of_irq_find_parent(wcd9xxx_res->dev->of_node)))
  546. return -EINVAL;
  547. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  548. if (!data) {
  549. pr_err("%s: interrupt controller is not registerd\n", __func__);
  550. return 0;
  551. }
  552. rmb();
  553. return data->irq;
  554. }
  555. static void wcd9xxx_irq_put_upstream_irq(
  556. struct wcd9xxx_core_resource *wcd9xxx_res)
  557. {
  558. /* Hold parent's of node */
  559. of_node_put(of_irq_find_parent(wcd9xxx_res->dev->of_node));
  560. }
  561. static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  562. {
  563. return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
  564. }
  565. static int __devinit wcd9xxx_irq_probe(struct platform_device *pdev)
  566. {
  567. int irq;
  568. struct irq_domain *domain;
  569. struct wcd9xxx_irq_drv_data *data;
  570. int ret = -EINVAL;
  571. irq = platform_get_irq_byname(pdev, "cdc-int");
  572. if (irq < 0) {
  573. dev_err(&pdev->dev, "%s: Couldn't find cdc-int node(%d)\n",
  574. __func__, irq);
  575. return -EINVAL;
  576. } else {
  577. dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
  578. domain = irq_find_host(pdev->dev.of_node);
  579. if (unlikely(!domain)) {
  580. pr_err("%s: domain is NULL", __func__);
  581. return -EINVAL;
  582. }
  583. data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
  584. data->irq = irq;
  585. wmb();
  586. ret = 0;
  587. }
  588. return ret;
  589. }
  590. static int wcd9xxx_irq_remove(struct platform_device *pdev)
  591. {
  592. struct irq_domain *domain;
  593. struct wcd9xxx_irq_drv_data *data;
  594. domain = irq_find_host(pdev->dev.of_node);
  595. if (unlikely(!domain)) {
  596. pr_err("%s: domain is NULL", __func__);
  597. return -EINVAL;
  598. }
  599. data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
  600. data->irq = 0;
  601. wmb();
  602. return 0;
  603. }
  604. static const struct of_device_id of_match[] = {
  605. { .compatible = "qcom,wcd9xxx-irq" },
  606. { }
  607. };
  608. static struct platform_driver wcd9xxx_irq_driver = {
  609. .probe = wcd9xxx_irq_probe,
  610. .remove = wcd9xxx_irq_remove,
  611. .driver = {
  612. .name = "wcd9xxx_intc",
  613. .owner = THIS_MODULE,
  614. .of_match_table = of_match_ptr(of_match),
  615. },
  616. };
  617. static int wcd9xxx_irq_drv_init(void)
  618. {
  619. return platform_driver_register(&wcd9xxx_irq_driver);
  620. }
  621. subsys_initcall(wcd9xxx_irq_drv_init);
  622. static void wcd9xxx_irq_drv_exit(void)
  623. {
  624. platform_driver_unregister(&wcd9xxx_irq_driver);
  625. }
  626. module_exit(wcd9xxx_irq_drv_exit);
  627. #endif /* CONFIG_OF */