regmap-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/device.h>
  13. #include <linux/export.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/regmap.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct regmap_irq_chip_data {
  22. struct mutex lock;
  23. struct irq_chip irq_chip;
  24. struct regmap *map;
  25. const struct regmap_irq_chip *chip;
  26. int irq_base;
  27. struct irq_domain *domain;
  28. int irq;
  29. int wake_count;
  30. void *status_reg_buf;
  31. unsigned int *status_buf;
  32. unsigned int *mask_buf;
  33. unsigned int *mask_buf_def;
  34. unsigned int *wake_buf;
  35. unsigned int *type_buf;
  36. unsigned int *type_buf_def;
  37. unsigned int irq_reg_stride;
  38. unsigned int type_reg_stride;
  39. };
  40. static inline const
  41. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  42. int irq)
  43. {
  44. return &data->chip->irqs[irq];
  45. }
  46. static void regmap_irq_lock(struct irq_data *data)
  47. {
  48. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  49. mutex_lock(&d->lock);
  50. }
  51. static void regmap_irq_sync_unlock(struct irq_data *data)
  52. {
  53. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  54. struct regmap *map = d->map;
  55. int i, ret;
  56. u32 reg;
  57. u32 unmask_offset;
  58. if (d->chip->runtime_pm) {
  59. ret = pm_runtime_get_sync(map->dev);
  60. if (ret < 0)
  61. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  62. ret);
  63. }
  64. /*
  65. * If there's been a change in the mask write it back to the
  66. * hardware. We rely on the use of the regmap core cache to
  67. * suppress pointless writes.
  68. */
  69. for (i = 0; i < d->chip->num_regs; i++) {
  70. reg = d->chip->mask_base +
  71. (i * map->reg_stride * d->irq_reg_stride);
  72. if (d->chip->mask_invert) {
  73. ret = regmap_update_bits(d->map, reg,
  74. d->mask_buf_def[i], ~d->mask_buf[i]);
  75. } else if (d->chip->unmask_base) {
  76. /* set mask with mask_base register */
  77. ret = regmap_update_bits(d->map, reg,
  78. d->mask_buf_def[i], ~d->mask_buf[i]);
  79. if (ret < 0)
  80. dev_err(d->map->dev,
  81. "Failed to sync unmasks in %x\n",
  82. reg);
  83. unmask_offset = d->chip->unmask_base -
  84. d->chip->mask_base;
  85. /* clear mask with unmask_base register */
  86. ret = regmap_update_bits(d->map,
  87. reg + unmask_offset,
  88. d->mask_buf_def[i],
  89. d->mask_buf[i]);
  90. } else {
  91. ret = regmap_update_bits(d->map, reg,
  92. d->mask_buf_def[i], d->mask_buf[i]);
  93. }
  94. if (ret != 0)
  95. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  96. reg);
  97. reg = d->chip->wake_base +
  98. (i * map->reg_stride * d->irq_reg_stride);
  99. if (d->wake_buf) {
  100. if (d->chip->wake_invert)
  101. ret = regmap_update_bits(d->map, reg,
  102. d->mask_buf_def[i],
  103. ~d->wake_buf[i]);
  104. else
  105. ret = regmap_update_bits(d->map, reg,
  106. d->mask_buf_def[i],
  107. d->wake_buf[i]);
  108. if (ret != 0)
  109. dev_err(d->map->dev,
  110. "Failed to sync wakes in %x: %d\n",
  111. reg, ret);
  112. }
  113. if (!d->chip->init_ack_masked)
  114. continue;
  115. /*
  116. * Ack all the masked interrupts unconditionally,
  117. * OR if there is masked interrupt which hasn't been Acked,
  118. * it'll be ignored in irq handler, then may introduce irq storm
  119. */
  120. if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
  121. reg = d->chip->ack_base +
  122. (i * map->reg_stride * d->irq_reg_stride);
  123. /* some chips ack by write 0 */
  124. if (d->chip->ack_invert)
  125. ret = regmap_write(map, reg, ~d->mask_buf[i]);
  126. else
  127. ret = regmap_write(map, reg, d->mask_buf[i]);
  128. if (ret != 0)
  129. dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
  130. reg, ret);
  131. }
  132. }
  133. for (i = 0; i < d->chip->num_type_reg; i++) {
  134. if (!d->type_buf_def[i])
  135. continue;
  136. reg = d->chip->type_base +
  137. (i * map->reg_stride * d->type_reg_stride);
  138. if (d->chip->type_invert)
  139. ret = regmap_update_bits(d->map, reg,
  140. d->type_buf_def[i], ~d->type_buf[i]);
  141. else
  142. ret = regmap_update_bits(d->map, reg,
  143. d->type_buf_def[i], d->type_buf[i]);
  144. if (ret != 0)
  145. dev_err(d->map->dev, "Failed to sync type in %x\n",
  146. reg);
  147. }
  148. if (d->chip->runtime_pm)
  149. pm_runtime_put(map->dev);
  150. /* If we've changed our wakeup count propagate it to the parent */
  151. if (d->wake_count < 0)
  152. for (i = d->wake_count; i < 0; i++)
  153. irq_set_irq_wake(d->irq, 0);
  154. else if (d->wake_count > 0)
  155. for (i = 0; i < d->wake_count; i++)
  156. irq_set_irq_wake(d->irq, 1);
  157. d->wake_count = 0;
  158. mutex_unlock(&d->lock);
  159. }
  160. static void regmap_irq_enable(struct irq_data *data)
  161. {
  162. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  163. struct regmap *map = d->map;
  164. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  165. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  166. }
  167. static void regmap_irq_disable(struct irq_data *data)
  168. {
  169. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  170. struct regmap *map = d->map;
  171. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  172. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  173. }
  174. static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
  175. {
  176. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  177. struct regmap *map = d->map;
  178. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  179. int reg = irq_data->type_reg_offset / map->reg_stride;
  180. if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
  181. return 0;
  182. d->type_buf[reg] &= ~(irq_data->type_falling_mask |
  183. irq_data->type_rising_mask);
  184. switch (type) {
  185. case IRQ_TYPE_EDGE_FALLING:
  186. d->type_buf[reg] |= irq_data->type_falling_mask;
  187. break;
  188. case IRQ_TYPE_EDGE_RISING:
  189. d->type_buf[reg] |= irq_data->type_rising_mask;
  190. break;
  191. case IRQ_TYPE_EDGE_BOTH:
  192. d->type_buf[reg] |= (irq_data->type_falling_mask |
  193. irq_data->type_rising_mask);
  194. break;
  195. default:
  196. return -EINVAL;
  197. }
  198. return 0;
  199. }
  200. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  201. {
  202. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  203. struct regmap *map = d->map;
  204. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  205. if (on) {
  206. if (d->wake_buf)
  207. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  208. &= ~irq_data->mask;
  209. d->wake_count++;
  210. } else {
  211. if (d->wake_buf)
  212. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  213. |= irq_data->mask;
  214. d->wake_count--;
  215. }
  216. return 0;
  217. }
  218. static const struct irq_chip regmap_irq_chip = {
  219. .irq_bus_lock = regmap_irq_lock,
  220. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  221. .irq_disable = regmap_irq_disable,
  222. .irq_enable = regmap_irq_enable,
  223. .irq_set_type = regmap_irq_set_type,
  224. .irq_set_wake = regmap_irq_set_wake,
  225. };
  226. static irqreturn_t regmap_irq_thread(int irq, void *d)
  227. {
  228. struct regmap_irq_chip_data *data = d;
  229. const struct regmap_irq_chip *chip = data->chip;
  230. struct regmap *map = data->map;
  231. int ret, i;
  232. bool handled = false;
  233. u32 reg;
  234. if (chip->handle_pre_irq)
  235. chip->handle_pre_irq(chip->irq_drv_data);
  236. if (chip->runtime_pm) {
  237. ret = pm_runtime_get_sync(map->dev);
  238. if (ret < 0) {
  239. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  240. ret);
  241. pm_runtime_put(map->dev);
  242. goto exit;
  243. }
  244. }
  245. /*
  246. * Read in the statuses, using a single bulk read if possible
  247. * in order to reduce the I/O overheads.
  248. */
  249. if (!map->use_single_read && map->reg_stride == 1 &&
  250. data->irq_reg_stride == 1) {
  251. u8 *buf8 = data->status_reg_buf;
  252. u16 *buf16 = data->status_reg_buf;
  253. u32 *buf32 = data->status_reg_buf;
  254. BUG_ON(!data->status_reg_buf);
  255. ret = regmap_bulk_read(map, chip->status_base,
  256. data->status_reg_buf,
  257. chip->num_regs);
  258. if (ret != 0) {
  259. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  260. ret);
  261. goto exit;
  262. }
  263. for (i = 0; i < data->chip->num_regs; i++) {
  264. switch (map->format.val_bytes) {
  265. case 1:
  266. data->status_buf[i] = buf8[i];
  267. break;
  268. case 2:
  269. data->status_buf[i] = buf16[i];
  270. break;
  271. case 4:
  272. data->status_buf[i] = buf32[i];
  273. break;
  274. default:
  275. BUG();
  276. goto exit;
  277. }
  278. }
  279. } else {
  280. for (i = 0; i < data->chip->num_regs; i++) {
  281. ret = regmap_read(map, chip->status_base +
  282. (i * map->reg_stride
  283. * data->irq_reg_stride),
  284. &data->status_buf[i]);
  285. if (ret != 0) {
  286. dev_err(map->dev,
  287. "Failed to read IRQ status: %d\n",
  288. ret);
  289. if (chip->runtime_pm)
  290. pm_runtime_put(map->dev);
  291. goto exit;
  292. }
  293. }
  294. }
  295. /*
  296. * Ignore masked IRQs and ack if we need to; we ack early so
  297. * there is no race between handling and acknowleding the
  298. * interrupt. We assume that typically few of the interrupts
  299. * will fire simultaneously so don't worry about overhead from
  300. * doing a write per register.
  301. */
  302. for (i = 0; i < data->chip->num_regs; i++) {
  303. data->status_buf[i] &= ~data->mask_buf[i];
  304. if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  305. reg = chip->ack_base +
  306. (i * map->reg_stride * data->irq_reg_stride);
  307. ret = regmap_write(map, reg, data->status_buf[i]);
  308. if (ret != 0)
  309. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  310. reg, ret);
  311. }
  312. }
  313. for (i = 0; i < chip->num_irqs; i++) {
  314. if (data->status_buf[chip->irqs[i].reg_offset /
  315. map->reg_stride] & chip->irqs[i].mask) {
  316. handle_nested_irq(irq_find_mapping(data->domain, i));
  317. handled = true;
  318. }
  319. }
  320. if (chip->runtime_pm)
  321. pm_runtime_put(map->dev);
  322. exit:
  323. if (chip->handle_post_irq)
  324. chip->handle_post_irq(chip->irq_drv_data);
  325. if (handled)
  326. return IRQ_HANDLED;
  327. else
  328. return IRQ_NONE;
  329. }
  330. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  331. irq_hw_number_t hw)
  332. {
  333. struct regmap_irq_chip_data *data = h->host_data;
  334. irq_set_chip_data(virq, data);
  335. irq_set_chip(virq, &data->irq_chip);
  336. irq_set_nested_thread(virq, 1);
  337. irq_set_parent(virq, data->irq);
  338. irq_set_noprobe(virq);
  339. return 0;
  340. }
  341. static const struct irq_domain_ops regmap_domain_ops = {
  342. .map = regmap_irq_map,
  343. .xlate = irq_domain_xlate_twocell,
  344. };
  345. /**
  346. * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
  347. *
  348. * map: The regmap for the device.
  349. * irq: The IRQ the device uses to signal interrupts
  350. * irq_flags: The IRQF_ flags to use for the primary interrupt.
  351. * chip: Configuration for the interrupt controller.
  352. * data: Runtime data structure for the controller, allocated on success
  353. *
  354. * Returns 0 on success or an errno on failure.
  355. *
  356. * In order for this to be efficient the chip really should use a
  357. * register cache. The chip driver is responsible for restoring the
  358. * register values used by the IRQ controller over suspend and resume.
  359. */
  360. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  361. int irq_base, const struct regmap_irq_chip *chip,
  362. struct regmap_irq_chip_data **data)
  363. {
  364. struct regmap_irq_chip_data *d;
  365. int i;
  366. int ret = -ENOMEM;
  367. u32 reg;
  368. u32 unmask_offset;
  369. if (chip->num_regs <= 0)
  370. return -EINVAL;
  371. for (i = 0; i < chip->num_irqs; i++) {
  372. if (chip->irqs[i].reg_offset % map->reg_stride)
  373. return -EINVAL;
  374. if (chip->irqs[i].reg_offset / map->reg_stride >=
  375. chip->num_regs)
  376. return -EINVAL;
  377. }
  378. if (irq_base) {
  379. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  380. if (irq_base < 0) {
  381. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  382. irq_base);
  383. return irq_base;
  384. }
  385. }
  386. d = kzalloc(sizeof(*d), GFP_KERNEL);
  387. if (!d)
  388. return -ENOMEM;
  389. d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  390. GFP_KERNEL);
  391. if (!d->status_buf)
  392. goto err_alloc;
  393. d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  394. GFP_KERNEL);
  395. if (!d->mask_buf)
  396. goto err_alloc;
  397. d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
  398. GFP_KERNEL);
  399. if (!d->mask_buf_def)
  400. goto err_alloc;
  401. if (chip->wake_base) {
  402. d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  403. GFP_KERNEL);
  404. if (!d->wake_buf)
  405. goto err_alloc;
  406. }
  407. if (chip->num_type_reg) {
  408. d->type_buf_def = kcalloc(chip->num_type_reg,
  409. sizeof(unsigned int), GFP_KERNEL);
  410. if (!d->type_buf_def)
  411. goto err_alloc;
  412. d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
  413. GFP_KERNEL);
  414. if (!d->type_buf)
  415. goto err_alloc;
  416. }
  417. d->irq_chip = regmap_irq_chip;
  418. d->irq_chip.name = chip->name;
  419. d->irq = irq;
  420. d->map = map;
  421. d->chip = chip;
  422. d->irq_base = irq_base;
  423. if (chip->irq_reg_stride)
  424. d->irq_reg_stride = chip->irq_reg_stride;
  425. else
  426. d->irq_reg_stride = 1;
  427. if (chip->type_reg_stride)
  428. d->type_reg_stride = chip->type_reg_stride;
  429. else
  430. d->type_reg_stride = 1;
  431. if (!map->use_single_read && map->reg_stride == 1 &&
  432. d->irq_reg_stride == 1) {
  433. d->status_reg_buf = kmalloc_array(chip->num_regs,
  434. map->format.val_bytes,
  435. GFP_KERNEL);
  436. if (!d->status_reg_buf)
  437. goto err_alloc;
  438. }
  439. mutex_init(&d->lock);
  440. for (i = 0; i < chip->num_irqs; i++)
  441. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  442. |= chip->irqs[i].mask;
  443. /* Mask all the interrupts by default */
  444. for (i = 0; i < chip->num_regs; i++) {
  445. d->mask_buf[i] = d->mask_buf_def[i];
  446. reg = chip->mask_base +
  447. (i * map->reg_stride * d->irq_reg_stride);
  448. if (chip->mask_invert)
  449. ret = regmap_update_bits(map, reg,
  450. d->mask_buf[i], ~d->mask_buf[i]);
  451. else if (d->chip->unmask_base) {
  452. unmask_offset = d->chip->unmask_base -
  453. d->chip->mask_base;
  454. ret = regmap_update_bits(d->map,
  455. reg + unmask_offset,
  456. d->mask_buf[i],
  457. d->mask_buf[i]);
  458. } else
  459. ret = regmap_update_bits(map, reg,
  460. d->mask_buf[i], d->mask_buf[i]);
  461. if (ret != 0) {
  462. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  463. reg, ret);
  464. goto err_alloc;
  465. }
  466. if (!chip->init_ack_masked)
  467. continue;
  468. /* Ack masked but set interrupts */
  469. reg = chip->status_base +
  470. (i * map->reg_stride * d->irq_reg_stride);
  471. ret = regmap_read(map, reg, &d->status_buf[i]);
  472. if (ret != 0) {
  473. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  474. ret);
  475. goto err_alloc;
  476. }
  477. if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  478. reg = chip->ack_base +
  479. (i * map->reg_stride * d->irq_reg_stride);
  480. if (chip->ack_invert)
  481. ret = regmap_write(map, reg,
  482. ~(d->status_buf[i] & d->mask_buf[i]));
  483. else
  484. ret = regmap_write(map, reg,
  485. d->status_buf[i] & d->mask_buf[i]);
  486. if (ret != 0) {
  487. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  488. reg, ret);
  489. goto err_alloc;
  490. }
  491. }
  492. }
  493. /* Wake is disabled by default */
  494. if (d->wake_buf) {
  495. for (i = 0; i < chip->num_regs; i++) {
  496. d->wake_buf[i] = d->mask_buf_def[i];
  497. reg = chip->wake_base +
  498. (i * map->reg_stride * d->irq_reg_stride);
  499. if (chip->wake_invert)
  500. ret = regmap_update_bits(map, reg,
  501. d->mask_buf_def[i],
  502. 0);
  503. else
  504. ret = regmap_update_bits(map, reg,
  505. d->mask_buf_def[i],
  506. d->wake_buf[i]);
  507. if (ret != 0) {
  508. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  509. reg, ret);
  510. goto err_alloc;
  511. }
  512. }
  513. }
  514. if (chip->num_type_reg) {
  515. for (i = 0; i < chip->num_irqs; i++) {
  516. reg = chip->irqs[i].type_reg_offset / map->reg_stride;
  517. d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
  518. chip->irqs[i].type_falling_mask;
  519. }
  520. for (i = 0; i < chip->num_type_reg; ++i) {
  521. if (!d->type_buf_def[i])
  522. continue;
  523. reg = chip->type_base +
  524. (i * map->reg_stride * d->type_reg_stride);
  525. if (chip->type_invert)
  526. ret = regmap_update_bits(map, reg,
  527. d->type_buf_def[i], 0xFF);
  528. else
  529. ret = regmap_update_bits(map, reg,
  530. d->type_buf_def[i], 0x0);
  531. if (ret != 0) {
  532. dev_err(map->dev,
  533. "Failed to set type in 0x%x: %x\n",
  534. reg, ret);
  535. goto err_alloc;
  536. }
  537. }
  538. }
  539. if (irq_base)
  540. d->domain = irq_domain_add_legacy(map->dev->of_node,
  541. chip->num_irqs, irq_base, 0,
  542. &regmap_domain_ops, d);
  543. else
  544. d->domain = irq_domain_add_linear(map->dev->of_node,
  545. chip->num_irqs,
  546. &regmap_domain_ops, d);
  547. if (!d->domain) {
  548. dev_err(map->dev, "Failed to create IRQ domain\n");
  549. ret = -ENOMEM;
  550. goto err_alloc;
  551. }
  552. ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
  553. irq_flags | IRQF_ONESHOT,
  554. chip->name, d);
  555. if (ret != 0) {
  556. dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
  557. irq, chip->name, ret);
  558. goto err_domain;
  559. }
  560. *data = d;
  561. return 0;
  562. err_domain:
  563. /* Should really dispose of the domain but... */
  564. err_alloc:
  565. kfree(d->type_buf);
  566. kfree(d->type_buf_def);
  567. kfree(d->wake_buf);
  568. kfree(d->mask_buf_def);
  569. kfree(d->mask_buf);
  570. kfree(d->status_buf);
  571. kfree(d->status_reg_buf);
  572. kfree(d);
  573. return ret;
  574. }
  575. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  576. /**
  577. * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
  578. *
  579. * @irq: Primary IRQ for the device
  580. * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
  581. *
  582. * This function also dispose all mapped irq on chip.
  583. */
  584. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  585. {
  586. unsigned int virq;
  587. int hwirq;
  588. if (!d)
  589. return;
  590. free_irq(irq, d);
  591. /* Dispose all virtual irq from irq domain before removing it */
  592. for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
  593. /* Ignore hwirq if holes in the IRQ list */
  594. if (!d->chip->irqs[hwirq].mask)
  595. continue;
  596. /*
  597. * Find the virtual irq of hwirq on chip and if it is
  598. * there then dispose it
  599. */
  600. virq = irq_find_mapping(d->domain, hwirq);
  601. if (virq)
  602. irq_dispose_mapping(virq);
  603. }
  604. irq_domain_remove(d->domain);
  605. kfree(d->type_buf);
  606. kfree(d->type_buf_def);
  607. kfree(d->wake_buf);
  608. kfree(d->mask_buf_def);
  609. kfree(d->mask_buf);
  610. kfree(d->status_reg_buf);
  611. kfree(d->status_buf);
  612. kfree(d);
  613. }
  614. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  615. static void devm_regmap_irq_chip_release(struct device *dev, void *res)
  616. {
  617. struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
  618. regmap_del_irq_chip(d->irq, d);
  619. }
  620. static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
  621. {
  622. struct regmap_irq_chip_data **r = res;
  623. if (!r || !*r) {
  624. WARN_ON(!r || !*r);
  625. return 0;
  626. }
  627. return *r == data;
  628. }
  629. /**
  630. * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
  631. *
  632. * @dev: The device pointer on which irq_chip belongs to.
  633. * @map: The regmap for the device.
  634. * @irq: The IRQ the device uses to signal interrupts
  635. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  636. * @chip: Configuration for the interrupt controller.
  637. * @data: Runtime data structure for the controller, allocated on success
  638. *
  639. * Returns 0 on success or an errno on failure.
  640. *
  641. * The regmap_irq_chip data automatically be released when the device is
  642. * unbound.
  643. */
  644. int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
  645. int irq_flags, int irq_base,
  646. const struct regmap_irq_chip *chip,
  647. struct regmap_irq_chip_data **data)
  648. {
  649. struct regmap_irq_chip_data **ptr, *d;
  650. int ret;
  651. ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
  652. GFP_KERNEL);
  653. if (!ptr)
  654. return -ENOMEM;
  655. ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
  656. chip, &d);
  657. if (ret < 0) {
  658. devres_free(ptr);
  659. return ret;
  660. }
  661. *ptr = d;
  662. devres_add(dev, ptr);
  663. *data = d;
  664. return 0;
  665. }
  666. EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
  667. /**
  668. * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
  669. *
  670. * @dev: Device for which which resource was allocated.
  671. * @irq: Primary IRQ for the device
  672. * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
  673. */
  674. void devm_regmap_del_irq_chip(struct device *dev, int irq,
  675. struct regmap_irq_chip_data *data)
  676. {
  677. int rc;
  678. WARN_ON(irq != data->irq);
  679. rc = devres_release(dev, devm_regmap_irq_chip_release,
  680. devm_regmap_irq_chip_match, data);
  681. if (rc != 0)
  682. WARN_ON(rc);
  683. }
  684. EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
  685. /**
  686. * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
  687. *
  688. * Useful for drivers to request their own IRQs.
  689. *
  690. * @data: regmap_irq controller to operate on.
  691. */
  692. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  693. {
  694. WARN_ON(!data->irq_base);
  695. return data->irq_base;
  696. }
  697. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  698. /**
  699. * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
  700. *
  701. * Useful for drivers to request their own IRQs.
  702. *
  703. * @data: regmap_irq controller to operate on.
  704. * @irq: index of the interrupt requested in the chip IRQs
  705. */
  706. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  707. {
  708. /* Handle holes in the IRQ list */
  709. if (!data->chip->irqs[irq].mask)
  710. return -EINVAL;
  711. return irq_create_mapping(data->domain, irq);
  712. }
  713. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  714. /**
  715. * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
  716. *
  717. * Useful for drivers to request their own IRQs and for integration
  718. * with subsystems. For ease of integration NULL is accepted as a
  719. * domain, allowing devices to just call this even if no domain is
  720. * allocated.
  721. *
  722. * @data: regmap_irq controller to operate on.
  723. */
  724. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  725. {
  726. if (data)
  727. return data->domain;
  728. else
  729. return NULL;
  730. }
  731. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);