chip.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * linux/kernel/irq/chip.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code, for irq-chip
  8. * based architectures.
  9. *
  10. * Detailed information is available in Documentation/DocBook/genericirq
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include "internals.h"
  18. /**
  19. * irq_set_chip - set the irq chip for an irq
  20. * @irq: irq number
  21. * @chip: pointer to irq chip description structure
  22. */
  23. int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  24. {
  25. unsigned long flags;
  26. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  27. if (!desc)
  28. return -EINVAL;
  29. if (!chip)
  30. chip = &no_irq_chip;
  31. desc->irq_data.chip = chip;
  32. irq_put_desc_unlock(desc, flags);
  33. /*
  34. * For !CONFIG_SPARSE_IRQ make the irq show up in
  35. * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
  36. * already marked, and this call is harmless.
  37. */
  38. irq_reserve_irq(irq);
  39. return 0;
  40. }
  41. EXPORT_SYMBOL(irq_set_chip);
  42. /**
  43. * irq_set_type - set the irq trigger type for an irq
  44. * @irq: irq number
  45. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  46. */
  47. int irq_set_irq_type(unsigned int irq, unsigned int type)
  48. {
  49. unsigned long flags;
  50. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  51. int ret = 0;
  52. if (!desc)
  53. return -EINVAL;
  54. type &= IRQ_TYPE_SENSE_MASK;
  55. ret = __irq_set_trigger(desc, irq, type);
  56. irq_put_desc_busunlock(desc, flags);
  57. return ret;
  58. }
  59. EXPORT_SYMBOL(irq_set_irq_type);
  60. /**
  61. * irq_set_handler_data - set irq handler data for an irq
  62. * @irq: Interrupt number
  63. * @data: Pointer to interrupt specific data
  64. *
  65. * Set the hardware irq controller data for an irq
  66. */
  67. int irq_set_handler_data(unsigned int irq, void *data)
  68. {
  69. unsigned long flags;
  70. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  71. if (!desc)
  72. return -EINVAL;
  73. desc->irq_data.handler_data = data;
  74. irq_put_desc_unlock(desc, flags);
  75. return 0;
  76. }
  77. EXPORT_SYMBOL(irq_set_handler_data);
  78. /**
  79. * irq_set_msi_desc - set MSI descriptor data for an irq
  80. * @irq: Interrupt number
  81. * @entry: Pointer to MSI descriptor data
  82. *
  83. * Set the MSI descriptor entry for an irq
  84. */
  85. int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
  86. {
  87. unsigned long flags;
  88. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  89. if (!desc)
  90. return -EINVAL;
  91. desc->irq_data.msi_desc = entry;
  92. if (entry)
  93. entry->irq = irq;
  94. irq_put_desc_unlock(desc, flags);
  95. return 0;
  96. }
  97. /**
  98. * irq_set_chip_data - set irq chip data for an irq
  99. * @irq: Interrupt number
  100. * @data: Pointer to chip specific data
  101. *
  102. * Set the hardware irq chip data for an irq
  103. */
  104. int irq_set_chip_data(unsigned int irq, void *data)
  105. {
  106. unsigned long flags;
  107. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  108. if (!desc)
  109. return -EINVAL;
  110. desc->irq_data.chip_data = data;
  111. irq_put_desc_unlock(desc, flags);
  112. return 0;
  113. }
  114. EXPORT_SYMBOL(irq_set_chip_data);
  115. struct irq_data *irq_get_irq_data(unsigned int irq)
  116. {
  117. struct irq_desc *desc = irq_to_desc(irq);
  118. return desc ? &desc->irq_data : NULL;
  119. }
  120. EXPORT_SYMBOL_GPL(irq_get_irq_data);
  121. static void irq_state_clr_disabled(struct irq_desc *desc)
  122. {
  123. irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
  124. }
  125. static void irq_state_set_disabled(struct irq_desc *desc)
  126. {
  127. irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
  128. }
  129. static void irq_state_clr_masked(struct irq_desc *desc)
  130. {
  131. irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
  132. }
  133. static void irq_state_set_masked(struct irq_desc *desc)
  134. {
  135. irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
  136. }
  137. int irq_startup(struct irq_desc *desc, bool resend)
  138. {
  139. int ret = 0;
  140. irq_state_clr_disabled(desc);
  141. desc->depth = 0;
  142. if (desc->irq_data.chip->irq_startup) {
  143. ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
  144. irq_state_clr_masked(desc);
  145. } else {
  146. irq_enable(desc);
  147. }
  148. if (resend)
  149. check_irq_resend(desc, desc->irq_data.irq);
  150. return ret;
  151. }
  152. void irq_shutdown(struct irq_desc *desc)
  153. {
  154. irq_state_set_disabled(desc);
  155. desc->depth = 1;
  156. if (desc->irq_data.chip->irq_shutdown)
  157. desc->irq_data.chip->irq_shutdown(&desc->irq_data);
  158. else if (desc->irq_data.chip->irq_disable)
  159. desc->irq_data.chip->irq_disable(&desc->irq_data);
  160. else
  161. desc->irq_data.chip->irq_mask(&desc->irq_data);
  162. irq_state_set_masked(desc);
  163. }
  164. void irq_enable(struct irq_desc *desc)
  165. {
  166. irq_state_clr_disabled(desc);
  167. if (desc->irq_data.chip->irq_enable)
  168. desc->irq_data.chip->irq_enable(&desc->irq_data);
  169. else
  170. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  171. irq_state_clr_masked(desc);
  172. }
  173. void irq_disable(struct irq_desc *desc)
  174. {
  175. irq_state_set_disabled(desc);
  176. if (desc->irq_data.chip->irq_disable) {
  177. desc->irq_data.chip->irq_disable(&desc->irq_data);
  178. irq_state_set_masked(desc);
  179. }
  180. }
  181. static inline void mask_ack_irq(struct irq_desc *desc)
  182. {
  183. if (desc->irq_data.chip->irq_mask_ack)
  184. desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
  185. else {
  186. desc->irq_data.chip->irq_mask(&desc->irq_data);
  187. if (desc->irq_data.chip->irq_ack)
  188. desc->irq_data.chip->irq_ack(&desc->irq_data);
  189. }
  190. irq_state_set_masked(desc);
  191. }
  192. void mask_irq(struct irq_desc *desc)
  193. {
  194. if (desc->irq_data.chip->irq_mask) {
  195. desc->irq_data.chip->irq_mask(&desc->irq_data);
  196. irq_state_set_masked(desc);
  197. }
  198. }
  199. void unmask_irq(struct irq_desc *desc)
  200. {
  201. if (desc->irq_data.chip->irq_unmask) {
  202. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  203. irq_state_clr_masked(desc);
  204. }
  205. }
  206. /*
  207. * handle_nested_irq - Handle a nested irq from a irq thread
  208. * @irq: the interrupt number
  209. *
  210. * Handle interrupts which are nested into a threaded interrupt
  211. * handler. The handler function is called inside the calling
  212. * threads context.
  213. */
  214. void handle_nested_irq(unsigned int irq)
  215. {
  216. struct irq_desc *desc = irq_to_desc(irq);
  217. struct irqaction *action;
  218. irqreturn_t action_ret;
  219. might_sleep();
  220. raw_spin_lock_irq(&desc->lock);
  221. kstat_incr_irqs_this_cpu(irq, desc);
  222. action = desc->action;
  223. if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
  224. goto out_unlock;
  225. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  226. raw_spin_unlock_irq(&desc->lock);
  227. action_ret = action->thread_fn(action->irq, action->dev_id);
  228. if (!noirqdebug)
  229. note_interrupt(irq, desc, action_ret);
  230. raw_spin_lock_irq(&desc->lock);
  231. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  232. out_unlock:
  233. raw_spin_unlock_irq(&desc->lock);
  234. }
  235. EXPORT_SYMBOL_GPL(handle_nested_irq);
  236. static bool irq_check_poll(struct irq_desc *desc)
  237. {
  238. if (!(desc->istate & IRQS_POLL_INPROGRESS))
  239. return false;
  240. return irq_wait_for_poll(desc);
  241. }
  242. /**
  243. * handle_simple_irq - Simple and software-decoded IRQs.
  244. * @irq: the interrupt number
  245. * @desc: the interrupt description structure for this irq
  246. *
  247. * Simple interrupts are either sent from a demultiplexing interrupt
  248. * handler or come from hardware, where no interrupt hardware control
  249. * is necessary.
  250. *
  251. * Note: The caller is expected to handle the ack, clear, mask and
  252. * unmask issues if necessary.
  253. */
  254. void
  255. handle_simple_irq(unsigned int irq, struct irq_desc *desc)
  256. {
  257. raw_spin_lock(&desc->lock);
  258. if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
  259. if (!irq_check_poll(desc))
  260. goto out_unlock;
  261. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  262. kstat_incr_irqs_this_cpu(irq, desc);
  263. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
  264. goto out_unlock;
  265. handle_irq_event(desc);
  266. out_unlock:
  267. raw_spin_unlock(&desc->lock);
  268. }
  269. EXPORT_SYMBOL_GPL(handle_simple_irq);
  270. /*
  271. * Called unconditionally from handle_level_irq() and only for oneshot
  272. * interrupts from handle_fasteoi_irq()
  273. */
  274. static void cond_unmask_irq(struct irq_desc *desc)
  275. {
  276. /*
  277. * We need to unmask in the following cases:
  278. * - Standard level irq (IRQF_ONESHOT is not set)
  279. * - Oneshot irq which did not wake the thread (caused by a
  280. * spurious interrupt or a primary handler handling it
  281. * completely).
  282. */
  283. if (!irqd_irq_disabled(&desc->irq_data) &&
  284. irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
  285. unmask_irq(desc);
  286. }
  287. /**
  288. * handle_level_irq - Level type irq handler
  289. * @irq: the interrupt number
  290. * @desc: the interrupt description structure for this irq
  291. *
  292. * Level type interrupts are active as long as the hardware line has
  293. * the active level. This may require to mask the interrupt and unmask
  294. * it after the associated handler has acknowledged the device, so the
  295. * interrupt line is back to inactive.
  296. */
  297. void
  298. handle_level_irq(unsigned int irq, struct irq_desc *desc)
  299. {
  300. raw_spin_lock(&desc->lock);
  301. mask_ack_irq(desc);
  302. if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
  303. if (!irq_check_poll(desc))
  304. goto out_unlock;
  305. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  306. kstat_incr_irqs_this_cpu(irq, desc);
  307. /*
  308. * If its disabled or no action available
  309. * keep it masked and get out of here
  310. */
  311. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
  312. goto out_unlock;
  313. handle_irq_event(desc);
  314. cond_unmask_irq(desc);
  315. out_unlock:
  316. raw_spin_unlock(&desc->lock);
  317. }
  318. EXPORT_SYMBOL_GPL(handle_level_irq);
  319. #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
  320. static inline void preflow_handler(struct irq_desc *desc)
  321. {
  322. if (desc->preflow_handler)
  323. desc->preflow_handler(&desc->irq_data);
  324. }
  325. #else
  326. static inline void preflow_handler(struct irq_desc *desc) { }
  327. #endif
  328. /**
  329. * handle_fasteoi_irq - irq handler for transparent controllers
  330. * @irq: the interrupt number
  331. * @desc: the interrupt description structure for this irq
  332. *
  333. * Only a single callback will be issued to the chip: an ->eoi()
  334. * call when the interrupt has been serviced. This enables support
  335. * for modern forms of interrupt handlers, which handle the flow
  336. * details in hardware, transparently.
  337. */
  338. void
  339. handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
  340. {
  341. raw_spin_lock(&desc->lock);
  342. if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
  343. if (!irq_check_poll(desc))
  344. goto out;
  345. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  346. kstat_incr_irqs_this_cpu(irq, desc);
  347. /*
  348. * If its disabled or no action available
  349. * then mask it and get out of here:
  350. */
  351. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  352. desc->istate |= IRQS_PENDING;
  353. mask_irq(desc);
  354. goto out;
  355. }
  356. if (desc->istate & IRQS_ONESHOT)
  357. mask_irq(desc);
  358. preflow_handler(desc);
  359. handle_irq_event(desc);
  360. if (desc->istate & IRQS_ONESHOT)
  361. cond_unmask_irq(desc);
  362. out_eoi:
  363. desc->irq_data.chip->irq_eoi(&desc->irq_data);
  364. out_unlock:
  365. raw_spin_unlock(&desc->lock);
  366. return;
  367. out:
  368. if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
  369. goto out_eoi;
  370. goto out_unlock;
  371. }
  372. /**
  373. * handle_edge_irq - edge type IRQ handler
  374. * @irq: the interrupt number
  375. * @desc: the interrupt description structure for this irq
  376. *
  377. * Interrupt occures on the falling and/or rising edge of a hardware
  378. * signal. The occurrence is latched into the irq controller hardware
  379. * and must be acked in order to be reenabled. After the ack another
  380. * interrupt can happen on the same source even before the first one
  381. * is handled by the associated event handler. If this happens it
  382. * might be necessary to disable (mask) the interrupt depending on the
  383. * controller hardware. This requires to reenable the interrupt inside
  384. * of the loop which handles the interrupts which have arrived while
  385. * the handler was running. If all pending interrupts are handled, the
  386. * loop is left.
  387. */
  388. void
  389. handle_edge_irq(unsigned int irq, struct irq_desc *desc)
  390. {
  391. raw_spin_lock(&desc->lock);
  392. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  393. /*
  394. * If we're currently running this IRQ, or its disabled,
  395. * we shouldn't process the IRQ. Mark it pending, handle
  396. * the necessary masking and go out
  397. */
  398. if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
  399. irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
  400. if (!irq_check_poll(desc)) {
  401. desc->istate |= IRQS_PENDING;
  402. mask_ack_irq(desc);
  403. goto out_unlock;
  404. }
  405. }
  406. kstat_incr_irqs_this_cpu(irq, desc);
  407. /* Start handling the irq */
  408. desc->irq_data.chip->irq_ack(&desc->irq_data);
  409. do {
  410. if (unlikely(!desc->action)) {
  411. mask_irq(desc);
  412. goto out_unlock;
  413. }
  414. /*
  415. * When another irq arrived while we were handling
  416. * one, we could have masked the irq.
  417. * Renable it, if it was not disabled in meantime.
  418. */
  419. if (unlikely(desc->istate & IRQS_PENDING)) {
  420. if (!irqd_irq_disabled(&desc->irq_data) &&
  421. irqd_irq_masked(&desc->irq_data))
  422. unmask_irq(desc);
  423. }
  424. handle_irq_event(desc);
  425. } while ((desc->istate & IRQS_PENDING) &&
  426. !irqd_irq_disabled(&desc->irq_data));
  427. out_unlock:
  428. raw_spin_unlock(&desc->lock);
  429. }
  430. #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
  431. /**
  432. * handle_edge_eoi_irq - edge eoi type IRQ handler
  433. * @irq: the interrupt number
  434. * @desc: the interrupt description structure for this irq
  435. *
  436. * Similar as the above handle_edge_irq, but using eoi and w/o the
  437. * mask/unmask logic.
  438. */
  439. void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
  440. {
  441. struct irq_chip *chip = irq_desc_get_chip(desc);
  442. raw_spin_lock(&desc->lock);
  443. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  444. /*
  445. * If we're currently running this IRQ, or its disabled,
  446. * we shouldn't process the IRQ. Mark it pending, handle
  447. * the necessary masking and go out
  448. */
  449. if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
  450. irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
  451. if (!irq_check_poll(desc)) {
  452. desc->istate |= IRQS_PENDING;
  453. goto out_eoi;
  454. }
  455. }
  456. kstat_incr_irqs_this_cpu(irq, desc);
  457. do {
  458. if (unlikely(!desc->action))
  459. goto out_eoi;
  460. handle_irq_event(desc);
  461. } while ((desc->istate & IRQS_PENDING) &&
  462. !irqd_irq_disabled(&desc->irq_data));
  463. out_eoi:
  464. chip->irq_eoi(&desc->irq_data);
  465. raw_spin_unlock(&desc->lock);
  466. }
  467. #endif
  468. /**
  469. * handle_percpu_irq - Per CPU local irq handler
  470. * @irq: the interrupt number
  471. * @desc: the interrupt description structure for this irq
  472. *
  473. * Per CPU interrupts on SMP machines without locking requirements
  474. */
  475. void
  476. handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
  477. {
  478. struct irq_chip *chip = irq_desc_get_chip(desc);
  479. kstat_incr_irqs_this_cpu(irq, desc);
  480. if (chip->irq_ack)
  481. chip->irq_ack(&desc->irq_data);
  482. handle_irq_event_percpu(desc, desc->action);
  483. if (chip->irq_eoi)
  484. chip->irq_eoi(&desc->irq_data);
  485. }
  486. void
  487. __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
  488. const char *name)
  489. {
  490. unsigned long flags;
  491. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  492. if (!desc)
  493. return;
  494. if (!handle) {
  495. handle = handle_bad_irq;
  496. } else {
  497. if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
  498. goto out;
  499. }
  500. /* Uninstall? */
  501. if (handle == handle_bad_irq) {
  502. if (desc->irq_data.chip != &no_irq_chip)
  503. mask_ack_irq(desc);
  504. irq_state_set_disabled(desc);
  505. desc->depth = 1;
  506. }
  507. desc->handle_irq = handle;
  508. desc->name = name;
  509. if (handle != handle_bad_irq && is_chained) {
  510. irq_settings_set_noprobe(desc);
  511. irq_settings_set_norequest(desc);
  512. irq_settings_set_nothread(desc);
  513. irq_startup(desc, true);
  514. }
  515. out:
  516. irq_put_desc_busunlock(desc, flags);
  517. }
  518. EXPORT_SYMBOL_GPL(__irq_set_handler);
  519. void
  520. irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
  521. irq_flow_handler_t handle, const char *name)
  522. {
  523. irq_set_chip(irq, chip);
  524. __irq_set_handler(irq, handle, 0, name);
  525. }
  526. void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
  527. {
  528. unsigned long flags;
  529. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  530. if (!desc)
  531. return;
  532. irq_settings_clr_and_set(desc, clr, set);
  533. irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
  534. IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
  535. if (irq_settings_has_no_balance_set(desc))
  536. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  537. if (irq_settings_is_per_cpu(desc))
  538. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  539. if (irq_settings_can_move_pcntxt(desc))
  540. irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
  541. if (irq_settings_is_level(desc))
  542. irqd_set(&desc->irq_data, IRQD_LEVEL);
  543. irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
  544. irq_put_desc_unlock(desc, flags);
  545. }
  546. EXPORT_SYMBOL_GPL(irq_modify_status);
  547. /**
  548. * irq_cpu_online - Invoke all irq_cpu_online functions.
  549. *
  550. * Iterate through all irqs and invoke the chip.irq_cpu_online()
  551. * for each.
  552. */
  553. void irq_cpu_online(void)
  554. {
  555. struct irq_desc *desc;
  556. struct irq_chip *chip;
  557. unsigned long flags;
  558. unsigned int irq;
  559. for_each_active_irq(irq) {
  560. desc = irq_to_desc(irq);
  561. if (!desc)
  562. continue;
  563. raw_spin_lock_irqsave(&desc->lock, flags);
  564. chip = irq_data_get_irq_chip(&desc->irq_data);
  565. if (chip && chip->irq_cpu_online &&
  566. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  567. !irqd_irq_disabled(&desc->irq_data)))
  568. chip->irq_cpu_online(&desc->irq_data);
  569. raw_spin_unlock_irqrestore(&desc->lock, flags);
  570. }
  571. }
  572. /**
  573. * irq_cpu_offline - Invoke all irq_cpu_offline functions.
  574. *
  575. * Iterate through all irqs and invoke the chip.irq_cpu_offline()
  576. * for each.
  577. */
  578. void irq_cpu_offline(void)
  579. {
  580. struct irq_desc *desc;
  581. struct irq_chip *chip;
  582. unsigned long flags;
  583. unsigned int irq;
  584. for_each_active_irq(irq) {
  585. desc = irq_to_desc(irq);
  586. if (!desc)
  587. continue;
  588. raw_spin_lock_irqsave(&desc->lock, flags);
  589. chip = irq_data_get_irq_chip(&desc->irq_data);
  590. if (chip && chip->irq_cpu_offline &&
  591. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  592. !irqd_irq_disabled(&desc->irq_data)))
  593. chip->irq_cpu_offline(&desc->irq_data);
  594. raw_spin_unlock_irqrestore(&desc->lock, flags);
  595. }
  596. }