industrialio-trigger.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /* The industrial I/O core, trigger handling functions
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/idr.h>
  11. #include <linux/err.h>
  12. #include <linux/device.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/slab.h>
  16. #include <linux/iio/iio.h>
  17. #include <linux/iio/trigger.h>
  18. #include "iio_core.h"
  19. #include "iio_core_trigger.h"
  20. #include <linux/iio/trigger_consumer.h>
  21. /* RFC - Question of approach
  22. * Make the common case (single sensor single trigger)
  23. * simple by starting trigger capture from when first sensors
  24. * is added.
  25. *
  26. * Complex simultaneous start requires use of 'hold' functionality
  27. * of the trigger. (not implemented)
  28. *
  29. * Any other suggestions?
  30. */
  31. static DEFINE_IDA(iio_trigger_ida);
  32. /* Single list of all available triggers */
  33. static LIST_HEAD(iio_trigger_list);
  34. static DEFINE_MUTEX(iio_trigger_list_lock);
  35. /**
  36. * iio_trigger_read_name() - retrieve useful identifying name
  37. * @dev: device associated with the iio_trigger
  38. * @attr: pointer to the device_attribute structure that is
  39. * being processed
  40. * @buf: buffer to print the name into
  41. *
  42. * Return: a negative number on failure or the number of written
  43. * characters on success.
  44. */
  45. static ssize_t iio_trigger_read_name(struct device *dev,
  46. struct device_attribute *attr,
  47. char *buf)
  48. {
  49. struct iio_trigger *trig = to_iio_trigger(dev);
  50. return sprintf(buf, "%s\n", trig->name);
  51. }
  52. static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
  53. static struct attribute *iio_trig_dev_attrs[] = {
  54. &dev_attr_name.attr,
  55. NULL,
  56. };
  57. ATTRIBUTE_GROUPS(iio_trig_dev);
  58. static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
  59. int iio_trigger_register(struct iio_trigger *trig_info)
  60. {
  61. int ret;
  62. /* trig_info->ops is required for the module member */
  63. if (!trig_info->ops)
  64. return -EINVAL;
  65. trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
  66. if (trig_info->id < 0)
  67. return trig_info->id;
  68. /* Set the name used for the sysfs directory etc */
  69. dev_set_name(&trig_info->dev, "trigger%ld",
  70. (unsigned long) trig_info->id);
  71. ret = device_add(&trig_info->dev);
  72. if (ret)
  73. goto error_unregister_id;
  74. /* Add to list of available triggers held by the IIO core */
  75. mutex_lock(&iio_trigger_list_lock);
  76. if (__iio_trigger_find_by_name(trig_info->name)) {
  77. pr_err("Duplicate trigger name '%s'\n", trig_info->name);
  78. ret = -EEXIST;
  79. goto error_device_del;
  80. }
  81. list_add_tail(&trig_info->list, &iio_trigger_list);
  82. mutex_unlock(&iio_trigger_list_lock);
  83. return 0;
  84. error_device_del:
  85. mutex_unlock(&iio_trigger_list_lock);
  86. device_del(&trig_info->dev);
  87. error_unregister_id:
  88. ida_simple_remove(&iio_trigger_ida, trig_info->id);
  89. return ret;
  90. }
  91. EXPORT_SYMBOL(iio_trigger_register);
  92. void iio_trigger_unregister(struct iio_trigger *trig_info)
  93. {
  94. mutex_lock(&iio_trigger_list_lock);
  95. list_del(&trig_info->list);
  96. mutex_unlock(&iio_trigger_list_lock);
  97. ida_simple_remove(&iio_trigger_ida, trig_info->id);
  98. /* Possible issue in here */
  99. device_del(&trig_info->dev);
  100. }
  101. EXPORT_SYMBOL(iio_trigger_unregister);
  102. int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
  103. {
  104. if (!indio_dev || !trig)
  105. return -EINVAL;
  106. mutex_lock(&indio_dev->mlock);
  107. WARN_ON(indio_dev->trig_readonly);
  108. indio_dev->trig = iio_trigger_get(trig);
  109. indio_dev->trig_readonly = true;
  110. mutex_unlock(&indio_dev->mlock);
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(iio_trigger_set_immutable);
  114. /* Search for trigger by name, assuming iio_trigger_list_lock held */
  115. static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
  116. {
  117. struct iio_trigger *iter;
  118. list_for_each_entry(iter, &iio_trigger_list, list)
  119. if (!strcmp(iter->name, name))
  120. return iter;
  121. return NULL;
  122. }
  123. static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
  124. {
  125. struct iio_trigger *trig = NULL, *iter;
  126. mutex_lock(&iio_trigger_list_lock);
  127. list_for_each_entry(iter, &iio_trigger_list, list)
  128. if (sysfs_streq(iter->name, name)) {
  129. trig = iter;
  130. iio_trigger_get(trig);
  131. break;
  132. }
  133. mutex_unlock(&iio_trigger_list_lock);
  134. return trig;
  135. }
  136. void iio_trigger_poll(struct iio_trigger *trig)
  137. {
  138. int i;
  139. if (!atomic_read(&trig->use_count)) {
  140. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  141. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  142. if (trig->subirqs[i].enabled)
  143. generic_handle_irq(trig->subirq_base + i);
  144. else
  145. iio_trigger_notify_done(trig);
  146. }
  147. }
  148. }
  149. EXPORT_SYMBOL(iio_trigger_poll);
  150. irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
  151. {
  152. iio_trigger_poll(private);
  153. return IRQ_HANDLED;
  154. }
  155. EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
  156. void iio_trigger_poll_chained(struct iio_trigger *trig)
  157. {
  158. int i;
  159. if (!atomic_read(&trig->use_count)) {
  160. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  161. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  162. if (trig->subirqs[i].enabled)
  163. handle_nested_irq(trig->subirq_base + i);
  164. else
  165. iio_trigger_notify_done(trig);
  166. }
  167. }
  168. }
  169. EXPORT_SYMBOL(iio_trigger_poll_chained);
  170. void iio_trigger_notify_done(struct iio_trigger *trig)
  171. {
  172. if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
  173. if (trig->ops->try_reenable(trig))
  174. /* Missed an interrupt so launch new poll now */
  175. iio_trigger_poll(trig);
  176. }
  177. EXPORT_SYMBOL(iio_trigger_notify_done);
  178. /* Trigger Consumer related functions */
  179. static int iio_trigger_get_irq(struct iio_trigger *trig)
  180. {
  181. int ret;
  182. mutex_lock(&trig->pool_lock);
  183. ret = bitmap_find_free_region(trig->pool,
  184. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  185. ilog2(1));
  186. mutex_unlock(&trig->pool_lock);
  187. if (ret >= 0)
  188. ret += trig->subirq_base;
  189. return ret;
  190. }
  191. static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
  192. {
  193. mutex_lock(&trig->pool_lock);
  194. clear_bit(irq - trig->subirq_base, trig->pool);
  195. mutex_unlock(&trig->pool_lock);
  196. }
  197. /* Complexity in here. With certain triggers (datardy) an acknowledgement
  198. * may be needed if the pollfuncs do not include the data read for the
  199. * triggering device.
  200. * This is not currently handled. Alternative of not enabling trigger unless
  201. * the relevant function is in there may be the best option.
  202. */
  203. /* Worth protecting against double additions? */
  204. static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
  205. struct iio_poll_func *pf)
  206. {
  207. int ret = 0;
  208. bool notinuse
  209. = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  210. /* Prevent the module from being removed whilst attached to a trigger */
  211. __module_get(pf->indio_dev->info->driver_module);
  212. /* Get irq number */
  213. pf->irq = iio_trigger_get_irq(trig);
  214. if (pf->irq < 0)
  215. goto out_put_module;
  216. /* Request irq */
  217. ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
  218. pf->type, pf->name,
  219. pf);
  220. if (ret < 0)
  221. goto out_put_irq;
  222. /* Enable trigger in driver */
  223. if (trig->ops->set_trigger_state && notinuse) {
  224. ret = trig->ops->set_trigger_state(trig, true);
  225. if (ret < 0)
  226. goto out_free_irq;
  227. }
  228. /*
  229. * Check if we just registered to our own trigger: we determine that
  230. * this is the case if the IIO device and the trigger device share the
  231. * same parent device.
  232. */
  233. if (pf->indio_dev->dev.parent == trig->dev.parent)
  234. trig->attached_own_device = true;
  235. return ret;
  236. out_free_irq:
  237. free_irq(pf->irq, pf);
  238. out_put_irq:
  239. iio_trigger_put_irq(trig, pf->irq);
  240. out_put_module:
  241. module_put(pf->indio_dev->info->driver_module);
  242. return ret;
  243. }
  244. static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
  245. struct iio_poll_func *pf)
  246. {
  247. int ret = 0;
  248. bool no_other_users
  249. = (bitmap_weight(trig->pool,
  250. CONFIG_IIO_CONSUMERS_PER_TRIGGER)
  251. == 1);
  252. if (trig->ops->set_trigger_state && no_other_users) {
  253. ret = trig->ops->set_trigger_state(trig, false);
  254. if (ret)
  255. return ret;
  256. }
  257. if (pf->indio_dev->dev.parent == trig->dev.parent)
  258. trig->attached_own_device = false;
  259. iio_trigger_put_irq(trig, pf->irq);
  260. free_irq(pf->irq, pf);
  261. module_put(pf->indio_dev->info->driver_module);
  262. return ret;
  263. }
  264. irqreturn_t iio_pollfunc_store_time(int irq, void *p)
  265. {
  266. struct iio_poll_func *pf = p;
  267. pf->timestamp = iio_get_time_ns(pf->indio_dev);
  268. return IRQ_WAKE_THREAD;
  269. }
  270. EXPORT_SYMBOL(iio_pollfunc_store_time);
  271. struct iio_poll_func
  272. *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
  273. irqreturn_t (*thread)(int irq, void *p),
  274. int type,
  275. struct iio_dev *indio_dev,
  276. const char *fmt,
  277. ...)
  278. {
  279. va_list vargs;
  280. struct iio_poll_func *pf;
  281. pf = kmalloc(sizeof *pf, GFP_KERNEL);
  282. if (pf == NULL)
  283. return NULL;
  284. va_start(vargs, fmt);
  285. pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  286. va_end(vargs);
  287. if (pf->name == NULL) {
  288. kfree(pf);
  289. return NULL;
  290. }
  291. pf->h = h;
  292. pf->thread = thread;
  293. pf->type = type;
  294. pf->indio_dev = indio_dev;
  295. return pf;
  296. }
  297. EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
  298. void iio_dealloc_pollfunc(struct iio_poll_func *pf)
  299. {
  300. kfree(pf->name);
  301. kfree(pf);
  302. }
  303. EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
  304. /**
  305. * iio_trigger_read_current() - trigger consumer sysfs query current trigger
  306. * @dev: device associated with an industrial I/O device
  307. * @attr: pointer to the device_attribute structure that
  308. * is being processed
  309. * @buf: buffer where the current trigger name will be printed into
  310. *
  311. * For trigger consumers the current_trigger interface allows the trigger
  312. * used by the device to be queried.
  313. *
  314. * Return: a negative number on failure, the number of characters written
  315. * on success or 0 if no trigger is available
  316. */
  317. static ssize_t iio_trigger_read_current(struct device *dev,
  318. struct device_attribute *attr,
  319. char *buf)
  320. {
  321. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  322. if (indio_dev->trig)
  323. return sprintf(buf, "%s\n", indio_dev->trig->name);
  324. return 0;
  325. }
  326. /**
  327. * iio_trigger_write_current() - trigger consumer sysfs set current trigger
  328. * @dev: device associated with an industrial I/O device
  329. * @attr: device attribute that is being processed
  330. * @buf: string buffer that holds the name of the trigger
  331. * @len: length of the trigger name held by buf
  332. *
  333. * For trigger consumers the current_trigger interface allows the trigger
  334. * used for this device to be specified at run time based on the trigger's
  335. * name.
  336. *
  337. * Return: negative error code on failure or length of the buffer
  338. * on success
  339. */
  340. static ssize_t iio_trigger_write_current(struct device *dev,
  341. struct device_attribute *attr,
  342. const char *buf,
  343. size_t len)
  344. {
  345. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  346. struct iio_trigger *oldtrig = indio_dev->trig;
  347. struct iio_trigger *trig;
  348. int ret;
  349. mutex_lock(&indio_dev->mlock);
  350. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
  351. mutex_unlock(&indio_dev->mlock);
  352. return -EBUSY;
  353. }
  354. if (indio_dev->trig_readonly) {
  355. mutex_unlock(&indio_dev->mlock);
  356. return -EPERM;
  357. }
  358. mutex_unlock(&indio_dev->mlock);
  359. trig = iio_trigger_acquire_by_name(buf);
  360. if (oldtrig == trig) {
  361. ret = len;
  362. goto out_trigger_put;
  363. }
  364. if (trig && indio_dev->info->validate_trigger) {
  365. ret = indio_dev->info->validate_trigger(indio_dev, trig);
  366. if (ret)
  367. goto out_trigger_put;
  368. }
  369. if (trig && trig->ops->validate_device) {
  370. ret = trig->ops->validate_device(trig, indio_dev);
  371. if (ret)
  372. goto out_trigger_put;
  373. }
  374. indio_dev->trig = trig;
  375. if (oldtrig) {
  376. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  377. iio_trigger_detach_poll_func(oldtrig,
  378. indio_dev->pollfunc_event);
  379. iio_trigger_put(oldtrig);
  380. }
  381. if (indio_dev->trig) {
  382. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  383. iio_trigger_attach_poll_func(indio_dev->trig,
  384. indio_dev->pollfunc_event);
  385. }
  386. return len;
  387. out_trigger_put:
  388. if (trig)
  389. iio_trigger_put(trig);
  390. return ret;
  391. }
  392. static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
  393. iio_trigger_read_current,
  394. iio_trigger_write_current);
  395. static struct attribute *iio_trigger_consumer_attrs[] = {
  396. &dev_attr_current_trigger.attr,
  397. NULL,
  398. };
  399. static const struct attribute_group iio_trigger_consumer_attr_group = {
  400. .name = "trigger",
  401. .attrs = iio_trigger_consumer_attrs,
  402. };
  403. static void iio_trig_release(struct device *device)
  404. {
  405. struct iio_trigger *trig = to_iio_trigger(device);
  406. int i;
  407. if (trig->subirq_base) {
  408. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  409. irq_modify_status(trig->subirq_base + i,
  410. IRQ_NOAUTOEN,
  411. IRQ_NOREQUEST | IRQ_NOPROBE);
  412. irq_set_chip(trig->subirq_base + i,
  413. NULL);
  414. irq_set_handler(trig->subirq_base + i,
  415. NULL);
  416. }
  417. irq_free_descs(trig->subirq_base,
  418. CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  419. }
  420. kfree(trig->name);
  421. kfree(trig);
  422. }
  423. static const struct device_type iio_trig_type = {
  424. .release = iio_trig_release,
  425. .groups = iio_trig_dev_groups,
  426. };
  427. static void iio_trig_subirqmask(struct irq_data *d)
  428. {
  429. struct irq_chip *chip = irq_data_get_irq_chip(d);
  430. struct iio_trigger *trig
  431. = container_of(chip,
  432. struct iio_trigger, subirq_chip);
  433. trig->subirqs[d->irq - trig->subirq_base].enabled = false;
  434. }
  435. static void iio_trig_subirqunmask(struct irq_data *d)
  436. {
  437. struct irq_chip *chip = irq_data_get_irq_chip(d);
  438. struct iio_trigger *trig
  439. = container_of(chip,
  440. struct iio_trigger, subirq_chip);
  441. trig->subirqs[d->irq - trig->subirq_base].enabled = true;
  442. }
  443. static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
  444. {
  445. struct iio_trigger *trig;
  446. int i;
  447. trig = kzalloc(sizeof *trig, GFP_KERNEL);
  448. if (!trig)
  449. return NULL;
  450. trig->dev.type = &iio_trig_type;
  451. trig->dev.bus = &iio_bus_type;
  452. device_initialize(&trig->dev);
  453. mutex_init(&trig->pool_lock);
  454. trig->subirq_base = irq_alloc_descs(-1, 0,
  455. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  456. 0);
  457. if (trig->subirq_base < 0)
  458. goto free_trig;
  459. trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  460. if (trig->name == NULL)
  461. goto free_descs;
  462. trig->subirq_chip.name = trig->name;
  463. trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
  464. trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
  465. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  466. irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
  467. irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
  468. irq_modify_status(trig->subirq_base + i,
  469. IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
  470. }
  471. get_device(&trig->dev);
  472. return trig;
  473. free_descs:
  474. irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  475. free_trig:
  476. kfree(trig);
  477. return NULL;
  478. }
  479. struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
  480. {
  481. struct iio_trigger *trig;
  482. va_list vargs;
  483. va_start(vargs, fmt);
  484. trig = viio_trigger_alloc(fmt, vargs);
  485. va_end(vargs);
  486. return trig;
  487. }
  488. EXPORT_SYMBOL(iio_trigger_alloc);
  489. void iio_trigger_free(struct iio_trigger *trig)
  490. {
  491. if (trig)
  492. put_device(&trig->dev);
  493. }
  494. EXPORT_SYMBOL(iio_trigger_free);
  495. static void devm_iio_trigger_release(struct device *dev, void *res)
  496. {
  497. iio_trigger_free(*(struct iio_trigger **)res);
  498. }
  499. static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
  500. {
  501. struct iio_trigger **r = res;
  502. if (!r || !*r) {
  503. WARN_ON(!r || !*r);
  504. return 0;
  505. }
  506. return *r == data;
  507. }
  508. /**
  509. * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
  510. * @dev: Device to allocate iio_trigger for
  511. * @fmt: trigger name format. If it includes format
  512. * specifiers, the additional arguments following
  513. * format are formatted and inserted in the resulting
  514. * string replacing their respective specifiers.
  515. *
  516. * Managed iio_trigger_alloc. iio_trigger allocated with this function is
  517. * automatically freed on driver detach.
  518. *
  519. * If an iio_trigger allocated with this function needs to be freed separately,
  520. * devm_iio_trigger_free() must be used.
  521. *
  522. * RETURNS:
  523. * Pointer to allocated iio_trigger on success, NULL on failure.
  524. */
  525. struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
  526. const char *fmt, ...)
  527. {
  528. struct iio_trigger **ptr, *trig;
  529. va_list vargs;
  530. ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
  531. GFP_KERNEL);
  532. if (!ptr)
  533. return NULL;
  534. /* use raw alloc_dr for kmalloc caller tracing */
  535. va_start(vargs, fmt);
  536. trig = viio_trigger_alloc(fmt, vargs);
  537. va_end(vargs);
  538. if (trig) {
  539. *ptr = trig;
  540. devres_add(dev, ptr);
  541. } else {
  542. devres_free(ptr);
  543. }
  544. return trig;
  545. }
  546. EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
  547. /**
  548. * devm_iio_trigger_free - Resource-managed iio_trigger_free()
  549. * @dev: Device this iio_dev belongs to
  550. * @iio_trig: the iio_trigger associated with the device
  551. *
  552. * Free iio_trigger allocated with devm_iio_trigger_alloc().
  553. */
  554. void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
  555. {
  556. int rc;
  557. rc = devres_release(dev, devm_iio_trigger_release,
  558. devm_iio_trigger_match, iio_trig);
  559. WARN_ON(rc);
  560. }
  561. EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
  562. static void devm_iio_trigger_unreg(struct device *dev, void *res)
  563. {
  564. iio_trigger_unregister(*(struct iio_trigger **)res);
  565. }
  566. /**
  567. * devm_iio_trigger_register - Resource-managed iio_trigger_register()
  568. * @dev: device this trigger was allocated for
  569. * @trig_info: trigger to register
  570. *
  571. * Managed iio_trigger_register(). The IIO trigger registered with this
  572. * function is automatically unregistered on driver detach. This function
  573. * calls iio_trigger_register() internally. Refer to that function for more
  574. * information.
  575. *
  576. * If an iio_trigger registered with this function needs to be unregistered
  577. * separately, devm_iio_trigger_unregister() must be used.
  578. *
  579. * RETURNS:
  580. * 0 on success, negative error number on failure.
  581. */
  582. int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
  583. {
  584. struct iio_trigger **ptr;
  585. int ret;
  586. ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
  587. if (!ptr)
  588. return -ENOMEM;
  589. *ptr = trig_info;
  590. ret = iio_trigger_register(trig_info);
  591. if (!ret)
  592. devres_add(dev, ptr);
  593. else
  594. devres_free(ptr);
  595. return ret;
  596. }
  597. EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
  598. /**
  599. * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
  600. * @dev: device this iio_trigger belongs to
  601. * @trig_info: the trigger associated with the device
  602. *
  603. * Unregister trigger registered with devm_iio_trigger_register().
  604. */
  605. void devm_iio_trigger_unregister(struct device *dev,
  606. struct iio_trigger *trig_info)
  607. {
  608. int rc;
  609. rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
  610. trig_info);
  611. WARN_ON(rc);
  612. }
  613. EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
  614. bool iio_trigger_using_own(struct iio_dev *indio_dev)
  615. {
  616. return indio_dev->trig->attached_own_device;
  617. }
  618. EXPORT_SYMBOL(iio_trigger_using_own);
  619. /**
  620. * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
  621. * the same device
  622. * @trig: The IIO trigger to check
  623. * @indio_dev: the IIO device to check
  624. *
  625. * This function can be used as the validate_device callback for triggers that
  626. * can only be attached to their own device.
  627. *
  628. * Return: 0 if both the trigger and the IIO device belong to the same
  629. * device, -EINVAL otherwise.
  630. */
  631. int iio_trigger_validate_own_device(struct iio_trigger *trig,
  632. struct iio_dev *indio_dev)
  633. {
  634. if (indio_dev->dev.parent != trig->dev.parent)
  635. return -EINVAL;
  636. return 0;
  637. }
  638. EXPORT_SYMBOL(iio_trigger_validate_own_device);
  639. void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
  640. {
  641. indio_dev->groups[indio_dev->groupcounter++] =
  642. &iio_trigger_consumer_attr_group;
  643. }
  644. void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
  645. {
  646. /* Clean up an associated but not attached trigger reference */
  647. if (indio_dev->trig)
  648. iio_trigger_put(indio_dev->trig);
  649. }
  650. int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
  651. {
  652. return iio_trigger_attach_poll_func(indio_dev->trig,
  653. indio_dev->pollfunc);
  654. }
  655. EXPORT_SYMBOL(iio_triggered_buffer_postenable);
  656. int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
  657. {
  658. return iio_trigger_detach_poll_func(indio_dev->trig,
  659. indio_dev->pollfunc);
  660. }
  661. EXPORT_SYMBOL(iio_triggered_buffer_predisable);