manage.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #include <linux/irq.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/random.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched.h>
  16. #include "internals.h"
  17. #ifdef CONFIG_IRQ_FORCED_THREADING
  18. __read_mostly bool force_irqthreads;
  19. static int __init setup_forced_irqthreads(char *arg)
  20. {
  21. force_irqthreads = true;
  22. return 0;
  23. }
  24. early_param("threadirqs", setup_forced_irqthreads);
  25. #endif
  26. /**
  27. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  28. * @irq: interrupt number to wait for
  29. *
  30. * This function waits for any pending IRQ handlers for this interrupt
  31. * to complete before returning. If you use this function while
  32. * holding a resource the IRQ handler may need you will deadlock.
  33. *
  34. * This function may be called - with care - from IRQ context.
  35. */
  36. void synchronize_irq(unsigned int irq)
  37. {
  38. struct irq_desc *desc = irq_to_desc(irq);
  39. bool inprogress;
  40. if (!desc)
  41. return;
  42. do {
  43. unsigned long flags;
  44. /*
  45. * Wait until we're out of the critical section. This might
  46. * give the wrong answer due to the lack of memory barriers.
  47. */
  48. while (irqd_irq_inprogress(&desc->irq_data))
  49. cpu_relax();
  50. /* Ok, that indicated we're done: double-check carefully. */
  51. raw_spin_lock_irqsave(&desc->lock, flags);
  52. inprogress = irqd_irq_inprogress(&desc->irq_data);
  53. raw_spin_unlock_irqrestore(&desc->lock, flags);
  54. /* Oops, that failed? */
  55. } while (inprogress);
  56. /*
  57. * We made sure that no hardirq handler is running. Now verify
  58. * that no threaded handlers are active.
  59. */
  60. wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  61. }
  62. EXPORT_SYMBOL(synchronize_irq);
  63. #ifdef CONFIG_SMP
  64. cpumask_var_t irq_default_affinity;
  65. /**
  66. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  67. * @irq: Interrupt to check
  68. *
  69. */
  70. int irq_can_set_affinity(unsigned int irq)
  71. {
  72. struct irq_desc *desc = irq_to_desc(irq);
  73. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  74. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  75. return 0;
  76. return 1;
  77. }
  78. /**
  79. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  80. * @desc: irq descriptor which has affitnity changed
  81. *
  82. * We just set IRQTF_AFFINITY and delegate the affinity setting
  83. * to the interrupt thread itself. We can not call
  84. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  85. * code can be called from hard interrupt context.
  86. */
  87. void irq_set_thread_affinity(struct irq_desc *desc)
  88. {
  89. struct irqaction *action = desc->action;
  90. while (action) {
  91. if (action->thread)
  92. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  93. action = action->next;
  94. }
  95. }
  96. #ifdef CONFIG_GENERIC_PENDING_IRQ
  97. static inline bool irq_can_move_pcntxt(struct irq_data *data)
  98. {
  99. return irqd_can_move_in_process_context(data);
  100. }
  101. static inline bool irq_move_pending(struct irq_data *data)
  102. {
  103. return irqd_is_setaffinity_pending(data);
  104. }
  105. static inline void
  106. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  107. {
  108. cpumask_copy(desc->pending_mask, mask);
  109. }
  110. static inline void
  111. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  112. {
  113. cpumask_copy(mask, desc->pending_mask);
  114. }
  115. #else
  116. static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
  117. static inline bool irq_move_pending(struct irq_data *data) { return false; }
  118. static inline void
  119. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  120. static inline void
  121. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  122. #endif
  123. int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
  124. {
  125. struct irq_chip *chip = irq_data_get_irq_chip(data);
  126. struct irq_desc *desc = irq_data_to_desc(data);
  127. int ret = 0;
  128. if (!chip || !chip->irq_set_affinity)
  129. return -EINVAL;
  130. if (irq_can_move_pcntxt(data)) {
  131. ret = chip->irq_set_affinity(data, mask, false);
  132. switch (ret) {
  133. case IRQ_SET_MASK_OK:
  134. cpumask_copy(data->affinity, mask);
  135. case IRQ_SET_MASK_OK_NOCOPY:
  136. irq_set_thread_affinity(desc);
  137. ret = 0;
  138. }
  139. } else {
  140. irqd_set_move_pending(data);
  141. irq_copy_pending(desc, mask);
  142. }
  143. if (desc->affinity_notify) {
  144. kref_get(&desc->affinity_notify->kref);
  145. schedule_work(&desc->affinity_notify->work);
  146. }
  147. irqd_set(data, IRQD_AFFINITY_SET);
  148. return ret;
  149. }
  150. /**
  151. * irq_set_affinity - Set the irq affinity of a given irq
  152. * @irq: Interrupt to set affinity
  153. * @mask: cpumask
  154. *
  155. */
  156. int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  157. {
  158. struct irq_desc *desc = irq_to_desc(irq);
  159. unsigned long flags;
  160. int ret;
  161. if (!desc)
  162. return -EINVAL;
  163. raw_spin_lock_irqsave(&desc->lock, flags);
  164. ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
  165. raw_spin_unlock_irqrestore(&desc->lock, flags);
  166. return ret;
  167. }
  168. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  169. {
  170. unsigned long flags;
  171. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  172. if (!desc)
  173. return -EINVAL;
  174. desc->affinity_hint = m;
  175. irq_put_desc_unlock(desc, flags);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  179. static void irq_affinity_notify(struct work_struct *work)
  180. {
  181. struct irq_affinity_notify *notify =
  182. container_of(work, struct irq_affinity_notify, work);
  183. struct irq_desc *desc = irq_to_desc(notify->irq);
  184. cpumask_var_t cpumask;
  185. unsigned long flags;
  186. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  187. goto out;
  188. raw_spin_lock_irqsave(&desc->lock, flags);
  189. if (irq_move_pending(&desc->irq_data))
  190. irq_get_pending(cpumask, desc);
  191. else
  192. cpumask_copy(cpumask, desc->irq_data.affinity);
  193. raw_spin_unlock_irqrestore(&desc->lock, flags);
  194. notify->notify(notify, cpumask);
  195. free_cpumask_var(cpumask);
  196. out:
  197. kref_put(&notify->kref, notify->release);
  198. }
  199. /**
  200. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  201. * @irq: Interrupt for which to enable/disable notification
  202. * @notify: Context for notification, or %NULL to disable
  203. * notification. Function pointers must be initialised;
  204. * the other fields will be initialised by this function.
  205. *
  206. * Must be called in process context. Notification may only be enabled
  207. * after the IRQ is allocated and must be disabled before the IRQ is
  208. * freed using free_irq().
  209. */
  210. int
  211. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  212. {
  213. struct irq_desc *desc = irq_to_desc(irq);
  214. struct irq_affinity_notify *old_notify;
  215. unsigned long flags;
  216. /* The release function is promised process context */
  217. might_sleep();
  218. if (!desc)
  219. return -EINVAL;
  220. /* Complete initialisation of *notify */
  221. if (notify) {
  222. notify->irq = irq;
  223. kref_init(&notify->kref);
  224. INIT_WORK(&notify->work, irq_affinity_notify);
  225. }
  226. raw_spin_lock_irqsave(&desc->lock, flags);
  227. old_notify = desc->affinity_notify;
  228. desc->affinity_notify = notify;
  229. raw_spin_unlock_irqrestore(&desc->lock, flags);
  230. if (old_notify)
  231. kref_put(&old_notify->kref, old_notify->release);
  232. return 0;
  233. }
  234. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  235. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  236. /*
  237. * Generic version of the affinity autoselector.
  238. */
  239. static int
  240. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  241. {
  242. struct irq_chip *chip = irq_desc_get_chip(desc);
  243. struct cpumask *set = irq_default_affinity;
  244. int ret;
  245. /* Excludes PER_CPU and NO_BALANCE interrupts */
  246. if (!irq_can_set_affinity(irq))
  247. return 0;
  248. /*
  249. * Preserve an userspace affinity setup, but make sure that
  250. * one of the targets is online.
  251. */
  252. if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  253. if (cpumask_intersects(desc->irq_data.affinity,
  254. cpu_online_mask))
  255. set = desc->irq_data.affinity;
  256. else
  257. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  258. }
  259. cpumask_and(mask, cpu_online_mask, set);
  260. ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
  261. switch (ret) {
  262. case IRQ_SET_MASK_OK:
  263. cpumask_copy(desc->irq_data.affinity, mask);
  264. case IRQ_SET_MASK_OK_NOCOPY:
  265. irq_set_thread_affinity(desc);
  266. }
  267. return 0;
  268. }
  269. #else
  270. static inline int
  271. setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
  272. {
  273. return irq_select_affinity(irq);
  274. }
  275. #endif
  276. /*
  277. * Called when affinity is set via /proc/irq
  278. */
  279. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  280. {
  281. struct irq_desc *desc = irq_to_desc(irq);
  282. unsigned long flags;
  283. int ret;
  284. raw_spin_lock_irqsave(&desc->lock, flags);
  285. ret = setup_affinity(irq, desc, mask);
  286. raw_spin_unlock_irqrestore(&desc->lock, flags);
  287. return ret;
  288. }
  289. #else
  290. static inline int
  291. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  292. {
  293. return 0;
  294. }
  295. #endif
  296. void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  297. {
  298. if (suspend) {
  299. if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
  300. return;
  301. desc->istate |= IRQS_SUSPENDED;
  302. }
  303. if (!desc->depth++)
  304. irq_disable(desc);
  305. }
  306. static int __disable_irq_nosync(unsigned int irq)
  307. {
  308. unsigned long flags;
  309. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  310. if (!desc)
  311. return -EINVAL;
  312. __disable_irq(desc, irq, false);
  313. irq_put_desc_busunlock(desc, flags);
  314. return 0;
  315. }
  316. /**
  317. * disable_irq_nosync - disable an irq without waiting
  318. * @irq: Interrupt to disable
  319. *
  320. * Disable the selected interrupt line. Disables and Enables are
  321. * nested.
  322. * Unlike disable_irq(), this function does not ensure existing
  323. * instances of the IRQ handler have completed before returning.
  324. *
  325. * This function may be called from IRQ context.
  326. */
  327. void disable_irq_nosync(unsigned int irq)
  328. {
  329. __disable_irq_nosync(irq);
  330. }
  331. EXPORT_SYMBOL(disable_irq_nosync);
  332. /**
  333. * disable_irq - disable an irq and wait for completion
  334. * @irq: Interrupt to disable
  335. *
  336. * Disable the selected interrupt line. Enables and Disables are
  337. * nested.
  338. * This function waits for any pending IRQ handlers for this interrupt
  339. * to complete before returning. If you use this function while
  340. * holding a resource the IRQ handler may need you will deadlock.
  341. *
  342. * This function may be called - with care - from IRQ context.
  343. */
  344. void disable_irq(unsigned int irq)
  345. {
  346. if (!__disable_irq_nosync(irq))
  347. synchronize_irq(irq);
  348. }
  349. EXPORT_SYMBOL(disable_irq);
  350. void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  351. {
  352. if (resume) {
  353. if (!(desc->istate & IRQS_SUSPENDED)) {
  354. if (!desc->action)
  355. return;
  356. if (!(desc->action->flags & IRQF_FORCE_RESUME))
  357. return;
  358. /* Pretend that it got disabled ! */
  359. desc->depth++;
  360. }
  361. desc->istate &= ~IRQS_SUSPENDED;
  362. }
  363. switch (desc->depth) {
  364. case 0:
  365. err_out:
  366. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
  367. break;
  368. case 1: {
  369. if (desc->istate & IRQS_SUSPENDED)
  370. goto err_out;
  371. /* Prevent probing on this irq: */
  372. irq_settings_set_noprobe(desc);
  373. irq_enable(desc);
  374. check_irq_resend(desc, irq);
  375. /* fall-through */
  376. }
  377. default:
  378. desc->depth--;
  379. }
  380. }
  381. /**
  382. * enable_irq - enable handling of an irq
  383. * @irq: Interrupt to enable
  384. *
  385. * Undoes the effect of one call to disable_irq(). If this
  386. * matches the last disable, processing of interrupts on this
  387. * IRQ line is re-enabled.
  388. *
  389. * This function may be called from IRQ context only when
  390. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  391. */
  392. void enable_irq(unsigned int irq)
  393. {
  394. unsigned long flags;
  395. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  396. if (!desc)
  397. return;
  398. if (WARN(!desc->irq_data.chip,
  399. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  400. goto out;
  401. __enable_irq(desc, irq, false);
  402. out:
  403. irq_put_desc_busunlock(desc, flags);
  404. }
  405. EXPORT_SYMBOL(enable_irq);
  406. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  407. {
  408. struct irq_desc *desc = irq_to_desc(irq);
  409. int ret = -ENXIO;
  410. if (desc->irq_data.chip->irq_set_wake)
  411. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  412. return ret;
  413. }
  414. /**
  415. * irq_set_irq_wake - control irq power management wakeup
  416. * @irq: interrupt to control
  417. * @on: enable/disable power management wakeup
  418. *
  419. * Enable/disable power management wakeup mode, which is
  420. * disabled by default. Enables and disables must match,
  421. * just as they match for non-wakeup mode support.
  422. *
  423. * Wakeup mode lets this IRQ wake the system from sleep
  424. * states like "suspend to RAM".
  425. */
  426. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  427. {
  428. unsigned long flags;
  429. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  430. int ret = 0;
  431. if (!desc)
  432. return -EINVAL;
  433. /* wakeup-capable irqs can be shared between drivers that
  434. * don't need to have the same sleep mode behaviors.
  435. */
  436. if (on) {
  437. if (desc->wake_depth++ == 0) {
  438. ret = set_irq_wake_real(irq, on);
  439. if (ret)
  440. desc->wake_depth = 0;
  441. else
  442. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  443. }
  444. } else {
  445. if (desc->wake_depth == 0) {
  446. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  447. } else if (--desc->wake_depth == 0) {
  448. ret = set_irq_wake_real(irq, on);
  449. if (ret)
  450. desc->wake_depth = 1;
  451. else
  452. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  453. }
  454. }
  455. irq_put_desc_busunlock(desc, flags);
  456. return ret;
  457. }
  458. EXPORT_SYMBOL(irq_set_irq_wake);
  459. /*
  460. * Internal function that tells the architecture code whether a
  461. * particular irq has been exclusively allocated or is available
  462. * for driver use.
  463. */
  464. int can_request_irq(unsigned int irq, unsigned long irqflags)
  465. {
  466. unsigned long flags;
  467. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  468. int canrequest = 0;
  469. if (!desc)
  470. return 0;
  471. if (irq_settings_can_request(desc)) {
  472. if (desc->action)
  473. if (irqflags & desc->action->flags & IRQF_SHARED)
  474. canrequest =1;
  475. }
  476. irq_put_desc_unlock(desc, flags);
  477. return canrequest;
  478. }
  479. int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  480. unsigned long flags)
  481. {
  482. struct irq_chip *chip = desc->irq_data.chip;
  483. int ret, unmask = 0;
  484. if (!chip || !chip->irq_set_type) {
  485. /*
  486. * IRQF_TRIGGER_* but the PIC does not support multiple
  487. * flow-types?
  488. */
  489. pr_debug("No set_type function for IRQ %d (%s)\n", irq,
  490. chip ? (chip->name ? : "unknown") : "unknown");
  491. return 0;
  492. }
  493. flags &= IRQ_TYPE_SENSE_MASK;
  494. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  495. if (!irqd_irq_masked(&desc->irq_data))
  496. mask_irq(desc);
  497. if (!irqd_irq_disabled(&desc->irq_data))
  498. unmask = 1;
  499. }
  500. /* caller masked out all except trigger mode flags */
  501. ret = chip->irq_set_type(&desc->irq_data, flags);
  502. switch (ret) {
  503. case IRQ_SET_MASK_OK:
  504. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  505. irqd_set(&desc->irq_data, flags);
  506. case IRQ_SET_MASK_OK_NOCOPY:
  507. flags = irqd_get_trigger_type(&desc->irq_data);
  508. irq_settings_set_trigger_mask(desc, flags);
  509. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  510. irq_settings_clr_level(desc);
  511. if (flags & IRQ_TYPE_LEVEL_MASK) {
  512. irq_settings_set_level(desc);
  513. irqd_set(&desc->irq_data, IRQD_LEVEL);
  514. }
  515. ret = 0;
  516. break;
  517. default:
  518. pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
  519. flags, irq, chip->irq_set_type);
  520. }
  521. if (unmask)
  522. unmask_irq(desc);
  523. return ret;
  524. }
  525. /*
  526. * Default primary interrupt handler for threaded interrupts. Is
  527. * assigned as primary handler when request_threaded_irq is called
  528. * with handler == NULL. Useful for oneshot interrupts.
  529. */
  530. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  531. {
  532. return IRQ_WAKE_THREAD;
  533. }
  534. /*
  535. * Primary handler for nested threaded interrupts. Should never be
  536. * called.
  537. */
  538. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  539. {
  540. WARN(1, "Primary handler called for nested irq %d\n", irq);
  541. return IRQ_NONE;
  542. }
  543. static int irq_wait_for_interrupt(struct irqaction *action)
  544. {
  545. set_current_state(TASK_INTERRUPTIBLE);
  546. while (!kthread_should_stop()) {
  547. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  548. &action->thread_flags)) {
  549. __set_current_state(TASK_RUNNING);
  550. return 0;
  551. }
  552. schedule();
  553. set_current_state(TASK_INTERRUPTIBLE);
  554. }
  555. __set_current_state(TASK_RUNNING);
  556. return -1;
  557. }
  558. /*
  559. * Oneshot interrupts keep the irq line masked until the threaded
  560. * handler finished. unmask if the interrupt has not been disabled and
  561. * is marked MASKED.
  562. */
  563. static void irq_finalize_oneshot(struct irq_desc *desc,
  564. struct irqaction *action, bool force)
  565. {
  566. if (!(desc->istate & IRQS_ONESHOT))
  567. return;
  568. again:
  569. chip_bus_lock(desc);
  570. raw_spin_lock_irq(&desc->lock);
  571. /*
  572. * Implausible though it may be we need to protect us against
  573. * the following scenario:
  574. *
  575. * The thread is faster done than the hard interrupt handler
  576. * on the other CPU. If we unmask the irq line then the
  577. * interrupt can come in again and masks the line, leaves due
  578. * to IRQS_INPROGRESS and the irq line is masked forever.
  579. *
  580. * This also serializes the state of shared oneshot handlers
  581. * versus "desc->threads_onehsot |= action->thread_mask;" in
  582. * irq_wake_thread(). See the comment there which explains the
  583. * serialization.
  584. */
  585. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  586. raw_spin_unlock_irq(&desc->lock);
  587. chip_bus_sync_unlock(desc);
  588. cpu_relax();
  589. goto again;
  590. }
  591. /*
  592. * Now check again, whether the thread should run. Otherwise
  593. * we would clear the threads_oneshot bit of this thread which
  594. * was just set.
  595. */
  596. if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  597. goto out_unlock;
  598. desc->threads_oneshot &= ~action->thread_mask;
  599. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  600. irqd_irq_masked(&desc->irq_data))
  601. unmask_irq(desc);
  602. out_unlock:
  603. raw_spin_unlock_irq(&desc->lock);
  604. chip_bus_sync_unlock(desc);
  605. }
  606. #ifdef CONFIG_SMP
  607. /*
  608. * Check whether we need to chasnge the affinity of the interrupt thread.
  609. */
  610. static void
  611. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  612. {
  613. cpumask_var_t mask;
  614. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  615. return;
  616. /*
  617. * In case we are out of memory we set IRQTF_AFFINITY again and
  618. * try again next time
  619. */
  620. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  621. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  622. return;
  623. }
  624. raw_spin_lock_irq(&desc->lock);
  625. cpumask_copy(mask, desc->irq_data.affinity);
  626. raw_spin_unlock_irq(&desc->lock);
  627. set_cpus_allowed_ptr(current, mask);
  628. free_cpumask_var(mask);
  629. }
  630. #else
  631. static inline void
  632. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  633. #endif
  634. /*
  635. * Interrupts which are not explicitely requested as threaded
  636. * interrupts rely on the implicit bh/preempt disable of the hard irq
  637. * context. So we need to disable bh here to avoid deadlocks and other
  638. * side effects.
  639. */
  640. static irqreturn_t
  641. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  642. {
  643. irqreturn_t ret;
  644. local_bh_disable();
  645. ret = action->thread_fn(action->irq, action->dev_id);
  646. irq_finalize_oneshot(desc, action, false);
  647. local_bh_enable();
  648. return ret;
  649. }
  650. /*
  651. * Interrupts explicitely requested as threaded interupts want to be
  652. * preemtible - many of them need to sleep and wait for slow busses to
  653. * complete.
  654. */
  655. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  656. struct irqaction *action)
  657. {
  658. irqreturn_t ret;
  659. ret = action->thread_fn(action->irq, action->dev_id);
  660. irq_finalize_oneshot(desc, action, false);
  661. return ret;
  662. }
  663. /*
  664. * Interrupt handler thread
  665. */
  666. static int irq_thread(void *data)
  667. {
  668. static const struct sched_param param = {
  669. .sched_priority = MAX_USER_RT_PRIO/2,
  670. };
  671. struct irqaction *action = data;
  672. struct irq_desc *desc = irq_to_desc(action->irq);
  673. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  674. struct irqaction *action);
  675. int wake;
  676. if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
  677. &action->thread_flags))
  678. handler_fn = irq_forced_thread_fn;
  679. else
  680. handler_fn = irq_thread_fn;
  681. sched_setscheduler(current, SCHED_FIFO, &param);
  682. current->irqaction = action;
  683. while (!irq_wait_for_interrupt(action)) {
  684. irq_thread_check_affinity(desc, action);
  685. atomic_inc(&desc->threads_active);
  686. raw_spin_lock_irq(&desc->lock);
  687. if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
  688. /*
  689. * CHECKME: We might need a dedicated
  690. * IRQ_THREAD_PENDING flag here, which
  691. * retriggers the thread in check_irq_resend()
  692. * but AFAICT IRQS_PENDING should be fine as it
  693. * retriggers the interrupt itself --- tglx
  694. */
  695. desc->istate |= IRQS_PENDING;
  696. raw_spin_unlock_irq(&desc->lock);
  697. } else {
  698. irqreturn_t action_ret;
  699. raw_spin_unlock_irq(&desc->lock);
  700. action_ret = handler_fn(desc, action);
  701. if (!noirqdebug)
  702. note_interrupt(action->irq, desc, action_ret);
  703. }
  704. wake = atomic_dec_and_test(&desc->threads_active);
  705. if (wake && waitqueue_active(&desc->wait_for_threads))
  706. wake_up(&desc->wait_for_threads);
  707. }
  708. /* Prevent a stale desc->threads_oneshot */
  709. irq_finalize_oneshot(desc, action, true);
  710. /*
  711. * Clear irqaction. Otherwise exit_irq_thread() would make
  712. * fuzz about an active irq thread going into nirvana.
  713. */
  714. current->irqaction = NULL;
  715. return 0;
  716. }
  717. /*
  718. * Called from do_exit()
  719. */
  720. void exit_irq_thread(void)
  721. {
  722. struct task_struct *tsk = current;
  723. struct irq_desc *desc;
  724. if (!tsk->irqaction)
  725. return;
  726. printk(KERN_ERR
  727. "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  728. tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
  729. desc = irq_to_desc(tsk->irqaction->irq);
  730. /*
  731. * Prevent a stale desc->threads_oneshot. Must be called
  732. * before setting the IRQTF_DIED flag.
  733. */
  734. irq_finalize_oneshot(desc, tsk->irqaction, true);
  735. /*
  736. * Set the THREAD DIED flag to prevent further wakeups of the
  737. * soon to be gone threaded handler.
  738. */
  739. set_bit(IRQTF_DIED, &tsk->irqaction->flags);
  740. }
  741. static void irq_setup_forced_threading(struct irqaction *new)
  742. {
  743. if (!force_irqthreads)
  744. return;
  745. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  746. return;
  747. new->flags |= IRQF_ONESHOT;
  748. if (!new->thread_fn) {
  749. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  750. new->thread_fn = new->handler;
  751. new->handler = irq_default_primary_handler;
  752. }
  753. }
  754. /*
  755. * Internal function to register an irqaction - typically used to
  756. * allocate special interrupts that are part of the architecture.
  757. */
  758. static int
  759. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  760. {
  761. struct irqaction *old, **old_ptr;
  762. const char *old_name = NULL;
  763. unsigned long flags, thread_mask = 0;
  764. int ret, nested, shared = 0;
  765. cpumask_var_t mask;
  766. if (!desc)
  767. return -EINVAL;
  768. if (desc->irq_data.chip == &no_irq_chip)
  769. return -ENOSYS;
  770. /*
  771. * Some drivers like serial.c use request_irq() heavily,
  772. * so we have to be careful not to interfere with a
  773. * running system.
  774. */
  775. if (new->flags & IRQF_SAMPLE_RANDOM) {
  776. /*
  777. * This function might sleep, we want to call it first,
  778. * outside of the atomic block.
  779. * Yes, this might clear the entropy pool if the wrong
  780. * driver is attempted to be loaded, without actually
  781. * installing a new handler, but is this really a problem,
  782. * only the sysadmin is able to do this.
  783. */
  784. rand_initialize_irq(irq);
  785. }
  786. /*
  787. * Check whether the interrupt nests into another interrupt
  788. * thread.
  789. */
  790. nested = irq_settings_is_nested_thread(desc);
  791. if (nested) {
  792. if (!new->thread_fn)
  793. return -EINVAL;
  794. /*
  795. * Replace the primary handler which was provided from
  796. * the driver for non nested interrupt handling by the
  797. * dummy function which warns when called.
  798. */
  799. new->handler = irq_nested_primary_handler;
  800. } else {
  801. if (irq_settings_can_thread(desc))
  802. irq_setup_forced_threading(new);
  803. }
  804. /*
  805. * Create a handler thread when a thread function is supplied
  806. * and the interrupt does not nest into another interrupt
  807. * thread.
  808. */
  809. if (new->thread_fn && !nested) {
  810. struct task_struct *t;
  811. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  812. new->name);
  813. if (IS_ERR(t))
  814. return PTR_ERR(t);
  815. /*
  816. * We keep the reference to the task struct even if
  817. * the thread dies to avoid that the interrupt code
  818. * references an already freed task_struct.
  819. */
  820. get_task_struct(t);
  821. new->thread = t;
  822. }
  823. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  824. ret = -ENOMEM;
  825. goto out_thread;
  826. }
  827. /*
  828. * The following block of code has to be executed atomically
  829. */
  830. raw_spin_lock_irqsave(&desc->lock, flags);
  831. old_ptr = &desc->action;
  832. old = *old_ptr;
  833. if (old) {
  834. /*
  835. * Can't share interrupts unless both agree to and are
  836. * the same type (level, edge, polarity). So both flag
  837. * fields must have IRQF_SHARED set and the bits which
  838. * set the trigger type must match. Also all must
  839. * agree on ONESHOT.
  840. */
  841. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  842. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  843. ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
  844. old_name = old->name;
  845. goto mismatch;
  846. }
  847. /* All handlers must agree on per-cpuness */
  848. if ((old->flags & IRQF_PERCPU) !=
  849. (new->flags & IRQF_PERCPU))
  850. goto mismatch;
  851. /* add new interrupt at end of irq queue */
  852. do {
  853. /*
  854. * Or all existing action->thread_mask bits,
  855. * so we can find the next zero bit for this
  856. * new action.
  857. */
  858. thread_mask |= old->thread_mask;
  859. old_ptr = &old->next;
  860. old = *old_ptr;
  861. } while (old);
  862. shared = 1;
  863. }
  864. /*
  865. * Setup the thread mask for this irqaction for ONESHOT. For
  866. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  867. * conditional in irq_wake_thread().
  868. */
  869. if (new->flags & IRQF_ONESHOT) {
  870. /*
  871. * Unlikely to have 32 resp 64 irqs sharing one line,
  872. * but who knows.
  873. */
  874. if (thread_mask == ~0UL) {
  875. ret = -EBUSY;
  876. goto out_mask;
  877. }
  878. /*
  879. * The thread_mask for the action is or'ed to
  880. * desc->thread_active to indicate that the
  881. * IRQF_ONESHOT thread handler has been woken, but not
  882. * yet finished. The bit is cleared when a thread
  883. * completes. When all threads of a shared interrupt
  884. * line have completed desc->threads_active becomes
  885. * zero and the interrupt line is unmasked. See
  886. * handle.c:irq_wake_thread() for further information.
  887. *
  888. * If no thread is woken by primary (hard irq context)
  889. * interrupt handlers, then desc->threads_active is
  890. * also checked for zero to unmask the irq line in the
  891. * affected hard irq flow handlers
  892. * (handle_[fasteoi|level]_irq).
  893. *
  894. * The new action gets the first zero bit of
  895. * thread_mask assigned. See the loop above which or's
  896. * all existing action->thread_mask bits.
  897. */
  898. new->thread_mask = 1 << ffz(thread_mask);
  899. }
  900. if (!shared) {
  901. init_waitqueue_head(&desc->wait_for_threads);
  902. /* Setup the type (level, edge polarity) if configured: */
  903. if (new->flags & IRQF_TRIGGER_MASK) {
  904. ret = __irq_set_trigger(desc, irq,
  905. new->flags & IRQF_TRIGGER_MASK);
  906. if (ret)
  907. goto out_mask;
  908. }
  909. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  910. IRQS_ONESHOT | IRQS_WAITING);
  911. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  912. if (new->flags & IRQF_PERCPU) {
  913. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  914. irq_settings_set_per_cpu(desc);
  915. }
  916. if (new->flags & IRQF_ONESHOT)
  917. desc->istate |= IRQS_ONESHOT;
  918. if (irq_settings_can_autoenable(desc))
  919. irq_startup(desc, true);
  920. else
  921. /* Undo nested disables: */
  922. desc->depth = 1;
  923. /* Exclude IRQ from balancing if requested */
  924. if (new->flags & IRQF_NOBALANCING) {
  925. irq_settings_set_no_balancing(desc);
  926. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  927. }
  928. /* Set default affinity mask once everything is setup */
  929. setup_affinity(irq, desc, mask);
  930. } else if (new->flags & IRQF_TRIGGER_MASK) {
  931. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  932. unsigned int omsk = irq_settings_get_trigger_mask(desc);
  933. if (nmsk != omsk)
  934. /* hope the handler works with current trigger mode */
  935. pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
  936. irq, nmsk, omsk);
  937. }
  938. new->irq = irq;
  939. *old_ptr = new;
  940. /* Reset broken irq detection when installing new handler */
  941. desc->irq_count = 0;
  942. desc->irqs_unhandled = 0;
  943. /*
  944. * Check whether we disabled the irq via the spurious handler
  945. * before. Reenable it and give it another chance.
  946. */
  947. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  948. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  949. __enable_irq(desc, irq, false);
  950. }
  951. raw_spin_unlock_irqrestore(&desc->lock, flags);
  952. /*
  953. * Strictly no need to wake it up, but hung_task complains
  954. * when no hard interrupt wakes the thread up.
  955. */
  956. if (new->thread)
  957. wake_up_process(new->thread);
  958. register_irq_proc(irq, desc);
  959. new->dir = NULL;
  960. register_handler_proc(irq, new);
  961. free_cpumask_var(mask);
  962. return 0;
  963. mismatch:
  964. #ifdef CONFIG_DEBUG_SHIRQ
  965. if (!(new->flags & IRQF_PROBE_SHARED)) {
  966. printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
  967. if (old_name)
  968. printk(KERN_ERR "current handler: %s\n", old_name);
  969. dump_stack();
  970. }
  971. #endif
  972. ret = -EBUSY;
  973. out_mask:
  974. raw_spin_unlock_irqrestore(&desc->lock, flags);
  975. free_cpumask_var(mask);
  976. out_thread:
  977. if (new->thread) {
  978. struct task_struct *t = new->thread;
  979. new->thread = NULL;
  980. if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
  981. kthread_stop(t);
  982. put_task_struct(t);
  983. }
  984. return ret;
  985. }
  986. /**
  987. * setup_irq - setup an interrupt
  988. * @irq: Interrupt line to setup
  989. * @act: irqaction for the interrupt
  990. *
  991. * Used to statically setup interrupts in the early boot process.
  992. */
  993. int setup_irq(unsigned int irq, struct irqaction *act)
  994. {
  995. int retval;
  996. struct irq_desc *desc = irq_to_desc(irq);
  997. chip_bus_lock(desc);
  998. retval = __setup_irq(irq, desc, act);
  999. chip_bus_sync_unlock(desc);
  1000. return retval;
  1001. }
  1002. EXPORT_SYMBOL_GPL(setup_irq);
  1003. /*
  1004. * Internal function to unregister an irqaction - used to free
  1005. * regular and special interrupts that are part of the architecture.
  1006. */
  1007. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  1008. {
  1009. struct irq_desc *desc = irq_to_desc(irq);
  1010. struct irqaction *action, **action_ptr;
  1011. unsigned long flags;
  1012. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1013. if (!desc)
  1014. return NULL;
  1015. raw_spin_lock_irqsave(&desc->lock, flags);
  1016. /*
  1017. * There can be multiple actions per IRQ descriptor, find the right
  1018. * one based on the dev_id:
  1019. */
  1020. action_ptr = &desc->action;
  1021. for (;;) {
  1022. action = *action_ptr;
  1023. if (!action) {
  1024. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1025. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1026. return NULL;
  1027. }
  1028. if (action->dev_id == dev_id)
  1029. break;
  1030. action_ptr = &action->next;
  1031. }
  1032. /* Found it - now remove it from the list of entries: */
  1033. *action_ptr = action->next;
  1034. /* Currently used only by UML, might disappear one day: */
  1035. #ifdef CONFIG_IRQ_RELEASE_METHOD
  1036. if (desc->irq_data.chip->release)
  1037. desc->irq_data.chip->release(irq, dev_id);
  1038. #endif
  1039. /* If this was the last handler, shut down the IRQ line: */
  1040. if (!desc->action)
  1041. irq_shutdown(desc);
  1042. #ifdef CONFIG_SMP
  1043. /* make sure affinity_hint is cleaned up */
  1044. if (WARN_ON_ONCE(desc->affinity_hint))
  1045. desc->affinity_hint = NULL;
  1046. #endif
  1047. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1048. unregister_handler_proc(irq, action);
  1049. /* Make sure it's not being used on another CPU: */
  1050. synchronize_irq(irq);
  1051. #ifdef CONFIG_DEBUG_SHIRQ
  1052. /*
  1053. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1054. * event to happen even now it's being freed, so let's make sure that
  1055. * is so by doing an extra call to the handler ....
  1056. *
  1057. * ( We do this after actually deregistering it, to make sure that a
  1058. * 'real' IRQ doesn't run in * parallel with our fake. )
  1059. */
  1060. if (action->flags & IRQF_SHARED) {
  1061. local_irq_save(flags);
  1062. action->handler(irq, dev_id);
  1063. local_irq_restore(flags);
  1064. }
  1065. #endif
  1066. if (action->thread) {
  1067. if (!test_bit(IRQTF_DIED, &action->thread_flags))
  1068. kthread_stop(action->thread);
  1069. put_task_struct(action->thread);
  1070. }
  1071. return action;
  1072. }
  1073. /**
  1074. * remove_irq - free an interrupt
  1075. * @irq: Interrupt line to free
  1076. * @act: irqaction for the interrupt
  1077. *
  1078. * Used to remove interrupts statically setup by the early boot process.
  1079. */
  1080. void remove_irq(unsigned int irq, struct irqaction *act)
  1081. {
  1082. __free_irq(irq, act->dev_id);
  1083. }
  1084. EXPORT_SYMBOL_GPL(remove_irq);
  1085. /**
  1086. * free_irq - free an interrupt allocated with request_irq
  1087. * @irq: Interrupt line to free
  1088. * @dev_id: Device identity to free
  1089. *
  1090. * Remove an interrupt handler. The handler is removed and if the
  1091. * interrupt line is no longer in use by any driver it is disabled.
  1092. * On a shared IRQ the caller must ensure the interrupt is disabled
  1093. * on the card it drives before calling this function. The function
  1094. * does not return until any executing interrupts for this IRQ
  1095. * have completed.
  1096. *
  1097. * This function must not be called from interrupt context.
  1098. */
  1099. void free_irq(unsigned int irq, void *dev_id)
  1100. {
  1101. struct irq_desc *desc = irq_to_desc(irq);
  1102. if (!desc)
  1103. return;
  1104. #ifdef CONFIG_SMP
  1105. if (WARN_ON(desc->affinity_notify))
  1106. desc->affinity_notify = NULL;
  1107. #endif
  1108. chip_bus_lock(desc);
  1109. kfree(__free_irq(irq, dev_id));
  1110. chip_bus_sync_unlock(desc);
  1111. }
  1112. EXPORT_SYMBOL(free_irq);
  1113. /**
  1114. * request_threaded_irq - allocate an interrupt line
  1115. * @irq: Interrupt line to allocate
  1116. * @handler: Function to be called when the IRQ occurs.
  1117. * Primary handler for threaded interrupts
  1118. * If NULL and thread_fn != NULL the default
  1119. * primary handler is installed
  1120. * @thread_fn: Function called from the irq handler thread
  1121. * If NULL, no irq thread is created
  1122. * @irqflags: Interrupt type flags
  1123. * @devname: An ascii name for the claiming device
  1124. * @dev_id: A cookie passed back to the handler function
  1125. *
  1126. * This call allocates interrupt resources and enables the
  1127. * interrupt line and IRQ handling. From the point this
  1128. * call is made your handler function may be invoked. Since
  1129. * your handler function must clear any interrupt the board
  1130. * raises, you must take care both to initialise your hardware
  1131. * and to set up the interrupt handler in the right order.
  1132. *
  1133. * If you want to set up a threaded irq handler for your device
  1134. * then you need to supply @handler and @thread_fn. @handler ist
  1135. * still called in hard interrupt context and has to check
  1136. * whether the interrupt originates from the device. If yes it
  1137. * needs to disable the interrupt on the device and return
  1138. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1139. * @thread_fn. This split handler design is necessary to support
  1140. * shared interrupts.
  1141. *
  1142. * Dev_id must be globally unique. Normally the address of the
  1143. * device data structure is used as the cookie. Since the handler
  1144. * receives this value it makes sense to use it.
  1145. *
  1146. * If your interrupt is shared you must pass a non NULL dev_id
  1147. * as this is required when freeing the interrupt.
  1148. *
  1149. * Flags:
  1150. *
  1151. * IRQF_SHARED Interrupt is shared
  1152. * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
  1153. * IRQF_TRIGGER_* Specify active edge(s) or level
  1154. *
  1155. */
  1156. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1157. irq_handler_t thread_fn, unsigned long irqflags,
  1158. const char *devname, void *dev_id)
  1159. {
  1160. struct irqaction *action;
  1161. struct irq_desc *desc;
  1162. int retval;
  1163. /*
  1164. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1165. * otherwise we'll have trouble later trying to figure out
  1166. * which interrupt is which (messes up the interrupt freeing
  1167. * logic etc).
  1168. */
  1169. if ((irqflags & IRQF_SHARED) && !dev_id)
  1170. return -EINVAL;
  1171. desc = irq_to_desc(irq);
  1172. if (!desc)
  1173. return -EINVAL;
  1174. if (!irq_settings_can_request(desc))
  1175. return -EINVAL;
  1176. if (!handler) {
  1177. if (!thread_fn)
  1178. return -EINVAL;
  1179. handler = irq_default_primary_handler;
  1180. }
  1181. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1182. if (!action)
  1183. return -ENOMEM;
  1184. action->handler = handler;
  1185. action->thread_fn = thread_fn;
  1186. action->flags = irqflags;
  1187. action->name = devname;
  1188. action->dev_id = dev_id;
  1189. chip_bus_lock(desc);
  1190. retval = __setup_irq(irq, desc, action);
  1191. chip_bus_sync_unlock(desc);
  1192. if (retval)
  1193. kfree(action);
  1194. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1195. if (!retval && (irqflags & IRQF_SHARED)) {
  1196. /*
  1197. * It's a shared IRQ -- the driver ought to be prepared for it
  1198. * to happen immediately, so let's make sure....
  1199. * We disable the irq to make sure that a 'real' IRQ doesn't
  1200. * run in parallel with our fake.
  1201. */
  1202. unsigned long flags;
  1203. disable_irq(irq);
  1204. local_irq_save(flags);
  1205. handler(irq, dev_id);
  1206. local_irq_restore(flags);
  1207. enable_irq(irq);
  1208. }
  1209. #endif
  1210. return retval;
  1211. }
  1212. EXPORT_SYMBOL(request_threaded_irq);
  1213. /**
  1214. * request_any_context_irq - allocate an interrupt line
  1215. * @irq: Interrupt line to allocate
  1216. * @handler: Function to be called when the IRQ occurs.
  1217. * Threaded handler for threaded interrupts.
  1218. * @flags: Interrupt type flags
  1219. * @name: An ascii name for the claiming device
  1220. * @dev_id: A cookie passed back to the handler function
  1221. *
  1222. * This call allocates interrupt resources and enables the
  1223. * interrupt line and IRQ handling. It selects either a
  1224. * hardirq or threaded handling method depending on the
  1225. * context.
  1226. *
  1227. * On failure, it returns a negative value. On success,
  1228. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1229. */
  1230. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1231. unsigned long flags, const char *name, void *dev_id)
  1232. {
  1233. struct irq_desc *desc = irq_to_desc(irq);
  1234. int ret;
  1235. if (!desc)
  1236. return -EINVAL;
  1237. if (irq_settings_is_nested_thread(desc)) {
  1238. ret = request_threaded_irq(irq, NULL, handler,
  1239. flags, name, dev_id);
  1240. return !ret ? IRQC_IS_NESTED : ret;
  1241. }
  1242. ret = request_irq(irq, handler, flags, name, dev_id);
  1243. return !ret ? IRQC_IS_HARDIRQ : ret;
  1244. }
  1245. EXPORT_SYMBOL_GPL(request_any_context_irq);