ir-raw.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /* ir-raw.c - handle IR pulse/space events
  2. *
  3. * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation version 2 of the License.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/export.h>
  15. #include <linux/kthread.h>
  16. #include <linux/mutex.h>
  17. #include <linux/kmod.h>
  18. #include <linux/sched.h>
  19. #include <linux/freezer.h>
  20. #include "rc-core-priv.h"
  21. /* Define the max number of pulse/space transitions to buffer */
  22. #define MAX_IR_EVENT_SIZE 512
  23. /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
  24. static LIST_HEAD(ir_raw_client_list);
  25. /* Used to handle IR raw handler extensions */
  26. static DEFINE_MUTEX(ir_raw_handler_lock);
  27. static LIST_HEAD(ir_raw_handler_list);
  28. static u64 available_protocols;
  29. #ifdef MODULE
  30. /* Used to load the decoders */
  31. static struct work_struct wq_load;
  32. #endif
  33. static int ir_raw_event_thread(void *data)
  34. {
  35. struct ir_raw_event ev;
  36. struct ir_raw_handler *handler;
  37. struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
  38. int retval;
  39. while (!kthread_should_stop()) {
  40. spin_lock_irq(&raw->lock);
  41. retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
  42. if (!retval) {
  43. set_current_state(TASK_INTERRUPTIBLE);
  44. if (kthread_should_stop())
  45. set_current_state(TASK_RUNNING);
  46. spin_unlock_irq(&raw->lock);
  47. schedule();
  48. continue;
  49. }
  50. spin_unlock_irq(&raw->lock);
  51. BUG_ON(retval != sizeof(ev));
  52. mutex_lock(&ir_raw_handler_lock);
  53. list_for_each_entry(handler, &ir_raw_handler_list, list)
  54. handler->decode(raw->dev, ev);
  55. raw->prev_ev = ev;
  56. mutex_unlock(&ir_raw_handler_lock);
  57. }
  58. return 0;
  59. }
  60. /**
  61. * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
  62. * @dev: the struct rc_dev device descriptor
  63. * @ev: the struct ir_raw_event descriptor of the pulse/space
  64. *
  65. * This routine (which may be called from an interrupt context) stores a
  66. * pulse/space duration for the raw ir decoding state machines. Pulses are
  67. * signalled as positive values and spaces as negative values. A zero value
  68. * will reset the decoding state machines.
  69. */
  70. int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
  71. {
  72. if (!dev->raw)
  73. return -EINVAL;
  74. IR_dprintk(2, "sample: (%05dus %s)\n",
  75. TO_US(ev->duration), TO_STR(ev->pulse));
  76. if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
  77. return -ENOMEM;
  78. return 0;
  79. }
  80. EXPORT_SYMBOL_GPL(ir_raw_event_store);
  81. /**
  82. * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
  83. * @dev: the struct rc_dev device descriptor
  84. * @type: the type of the event that has occurred
  85. *
  86. * This routine (which may be called from an interrupt context) is used to
  87. * store the beginning of an ir pulse or space (or the start/end of ir
  88. * reception) for the raw ir decoding state machines. This is used by
  89. * hardware which does not provide durations directly but only interrupts
  90. * (or similar events) on state change.
  91. */
  92. int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
  93. {
  94. ktime_t now;
  95. s64 delta; /* ns */
  96. DEFINE_IR_RAW_EVENT(ev);
  97. int rc = 0;
  98. int delay;
  99. if (!dev->raw)
  100. return -EINVAL;
  101. now = ktime_get();
  102. delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
  103. delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
  104. /* Check for a long duration since last event or if we're
  105. * being called for the first time, note that delta can't
  106. * possibly be negative.
  107. */
  108. if (delta > delay || !dev->raw->last_type)
  109. type |= IR_START_EVENT;
  110. else
  111. ev.duration = delta;
  112. if (type & IR_START_EVENT)
  113. ir_raw_event_reset(dev);
  114. else if (dev->raw->last_type & IR_SPACE) {
  115. ev.pulse = false;
  116. rc = ir_raw_event_store(dev, &ev);
  117. } else if (dev->raw->last_type & IR_PULSE) {
  118. ev.pulse = true;
  119. rc = ir_raw_event_store(dev, &ev);
  120. } else
  121. return 0;
  122. dev->raw->last_event = now;
  123. dev->raw->last_type = type;
  124. return rc;
  125. }
  126. EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
  127. /**
  128. * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  129. * @dev: the struct rc_dev device descriptor
  130. * @type: the type of the event that has occurred
  131. *
  132. * This routine (which may be called from an interrupt context) works
  133. * in similar manner to ir_raw_event_store_edge.
  134. * This routine is intended for devices with limited internal buffer
  135. * It automerges samples of same type, and handles timeouts
  136. */
  137. int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
  138. {
  139. if (!dev->raw)
  140. return -EINVAL;
  141. /* Ignore spaces in idle mode */
  142. if (dev->idle && !ev->pulse)
  143. return 0;
  144. else if (dev->idle)
  145. ir_raw_event_set_idle(dev, false);
  146. if (!dev->raw->this_ev.duration)
  147. dev->raw->this_ev = *ev;
  148. else if (ev->pulse == dev->raw->this_ev.pulse)
  149. dev->raw->this_ev.duration += ev->duration;
  150. else {
  151. ir_raw_event_store(dev, &dev->raw->this_ev);
  152. dev->raw->this_ev = *ev;
  153. }
  154. /* Enter idle mode if nessesary */
  155. if (!ev->pulse && dev->timeout &&
  156. dev->raw->this_ev.duration >= dev->timeout)
  157. ir_raw_event_set_idle(dev, true);
  158. return 0;
  159. }
  160. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
  161. /**
  162. * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
  163. * @dev: the struct rc_dev device descriptor
  164. * @idle: whether the device is idle or not
  165. */
  166. void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
  167. {
  168. if (!dev->raw)
  169. return;
  170. IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
  171. if (idle) {
  172. dev->raw->this_ev.timeout = true;
  173. ir_raw_event_store(dev, &dev->raw->this_ev);
  174. init_ir_raw_event(&dev->raw->this_ev);
  175. }
  176. if (dev->s_idle)
  177. dev->s_idle(dev, idle);
  178. dev->idle = idle;
  179. }
  180. EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  181. /**
  182. * ir_raw_event_handle() - schedules the decoding of stored ir data
  183. * @dev: the struct rc_dev device descriptor
  184. *
  185. * This routine will tell rc-core to start decoding stored ir data.
  186. */
  187. void ir_raw_event_handle(struct rc_dev *dev)
  188. {
  189. unsigned long flags;
  190. if (!dev->raw)
  191. return;
  192. spin_lock_irqsave(&dev->raw->lock, flags);
  193. wake_up_process(dev->raw->thread);
  194. spin_unlock_irqrestore(&dev->raw->lock, flags);
  195. }
  196. EXPORT_SYMBOL_GPL(ir_raw_event_handle);
  197. /* used internally by the sysfs interface */
  198. u64
  199. ir_raw_get_allowed_protocols(void)
  200. {
  201. u64 protocols;
  202. mutex_lock(&ir_raw_handler_lock);
  203. protocols = available_protocols;
  204. mutex_unlock(&ir_raw_handler_lock);
  205. return protocols;
  206. }
  207. /*
  208. * Used to (un)register raw event clients
  209. */
  210. int ir_raw_event_register(struct rc_dev *dev)
  211. {
  212. int rc;
  213. struct ir_raw_handler *handler;
  214. if (!dev)
  215. return -EINVAL;
  216. dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
  217. if (!dev->raw)
  218. return -ENOMEM;
  219. dev->raw->dev = dev;
  220. dev->raw->enabled_protocols = ~0;
  221. rc = kfifo_alloc(&dev->raw->kfifo,
  222. sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
  223. GFP_KERNEL);
  224. if (rc < 0)
  225. goto out;
  226. spin_lock_init(&dev->raw->lock);
  227. dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
  228. "rc%ld", dev->devno);
  229. if (IS_ERR(dev->raw->thread)) {
  230. rc = PTR_ERR(dev->raw->thread);
  231. goto out;
  232. }
  233. mutex_lock(&ir_raw_handler_lock);
  234. list_add_tail(&dev->raw->list, &ir_raw_client_list);
  235. list_for_each_entry(handler, &ir_raw_handler_list, list)
  236. if (handler->raw_register)
  237. handler->raw_register(dev);
  238. mutex_unlock(&ir_raw_handler_lock);
  239. return 0;
  240. out:
  241. kfree(dev->raw);
  242. dev->raw = NULL;
  243. return rc;
  244. }
  245. void ir_raw_event_unregister(struct rc_dev *dev)
  246. {
  247. struct ir_raw_handler *handler;
  248. if (!dev || !dev->raw)
  249. return;
  250. kthread_stop(dev->raw->thread);
  251. mutex_lock(&ir_raw_handler_lock);
  252. list_del(&dev->raw->list);
  253. list_for_each_entry(handler, &ir_raw_handler_list, list)
  254. if (handler->raw_unregister)
  255. handler->raw_unregister(dev);
  256. mutex_unlock(&ir_raw_handler_lock);
  257. kfifo_free(&dev->raw->kfifo);
  258. kfree(dev->raw);
  259. dev->raw = NULL;
  260. }
  261. /*
  262. * Extension interface - used to register the IR decoders
  263. */
  264. int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
  265. {
  266. struct ir_raw_event_ctrl *raw;
  267. mutex_lock(&ir_raw_handler_lock);
  268. list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
  269. if (ir_raw_handler->raw_register)
  270. list_for_each_entry(raw, &ir_raw_client_list, list)
  271. ir_raw_handler->raw_register(raw->dev);
  272. available_protocols |= ir_raw_handler->protocols;
  273. mutex_unlock(&ir_raw_handler_lock);
  274. return 0;
  275. }
  276. EXPORT_SYMBOL(ir_raw_handler_register);
  277. void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
  278. {
  279. struct ir_raw_event_ctrl *raw;
  280. mutex_lock(&ir_raw_handler_lock);
  281. list_del(&ir_raw_handler->list);
  282. if (ir_raw_handler->raw_unregister)
  283. list_for_each_entry(raw, &ir_raw_client_list, list)
  284. ir_raw_handler->raw_unregister(raw->dev);
  285. available_protocols &= ~ir_raw_handler->protocols;
  286. mutex_unlock(&ir_raw_handler_lock);
  287. }
  288. EXPORT_SYMBOL(ir_raw_handler_unregister);
  289. #ifdef MODULE
  290. static void init_decoders(struct work_struct *work)
  291. {
  292. /* Load the decoder modules */
  293. load_nec_decode();
  294. load_rc5_decode();
  295. load_rc6_decode();
  296. load_jvc_decode();
  297. load_sony_decode();
  298. load_sanyo_decode();
  299. load_mce_kbd_decode();
  300. load_lirc_codec();
  301. /* If needed, we may later add some init code. In this case,
  302. it is needed to change the CONFIG_MODULE test at rc-core.h
  303. */
  304. }
  305. #endif
  306. void ir_raw_init(void)
  307. {
  308. #ifdef MODULE
  309. INIT_WORK(&wq_load, init_decoders);
  310. schedule_work(&wq_load);
  311. #endif
  312. }