intel_breadcrumbs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Copyright © 2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/kthread.h>
  25. #include "i915_drv.h"
  26. static void intel_breadcrumbs_hangcheck(unsigned long data)
  27. {
  28. struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
  29. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  30. if (!b->irq_enabled)
  31. return;
  32. if (time_before(jiffies, b->timeout)) {
  33. mod_timer(&b->hangcheck, b->timeout);
  34. return;
  35. }
  36. DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
  37. set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
  38. mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
  39. /* Ensure that even if the GPU hangs, we get woken up.
  40. *
  41. * However, note that if no one is waiting, we never notice
  42. * a gpu hang. Eventually, we will have to wait for a resource
  43. * held by the GPU and so trigger a hangcheck. In the most
  44. * pathological case, this will be upon memory starvation! To
  45. * prevent this, we also queue the hangcheck from the retire
  46. * worker.
  47. */
  48. i915_queue_hangcheck(engine->i915);
  49. }
  50. static unsigned long wait_timeout(void)
  51. {
  52. return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
  53. }
  54. static void intel_breadcrumbs_fake_irq(unsigned long data)
  55. {
  56. struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
  57. /*
  58. * The timer persists in case we cannot enable interrupts,
  59. * or if we have previously seen seqno/interrupt incoherency
  60. * ("missed interrupt" syndrome). Here the worker will wake up
  61. * every jiffie in order to kick the oldest waiter to do the
  62. * coherent seqno check.
  63. */
  64. if (intel_engine_wakeup(engine))
  65. mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
  66. }
  67. static void irq_enable(struct intel_engine_cs *engine)
  68. {
  69. /* Enabling the IRQ may miss the generation of the interrupt, but
  70. * we still need to force the barrier before reading the seqno,
  71. * just in case.
  72. */
  73. engine->breadcrumbs.irq_posted = true;
  74. spin_lock_irq(&engine->i915->irq_lock);
  75. engine->irq_enable(engine);
  76. spin_unlock_irq(&engine->i915->irq_lock);
  77. }
  78. static void irq_disable(struct intel_engine_cs *engine)
  79. {
  80. spin_lock_irq(&engine->i915->irq_lock);
  81. engine->irq_disable(engine);
  82. spin_unlock_irq(&engine->i915->irq_lock);
  83. engine->breadcrumbs.irq_posted = false;
  84. }
  85. static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
  86. {
  87. struct intel_engine_cs *engine =
  88. container_of(b, struct intel_engine_cs, breadcrumbs);
  89. struct drm_i915_private *i915 = engine->i915;
  90. assert_spin_locked(&b->lock);
  91. if (b->rpm_wakelock)
  92. return;
  93. /* Since we are waiting on a request, the GPU should be busy
  94. * and should have its own rpm reference. For completeness,
  95. * record an rpm reference for ourselves to cover the
  96. * interrupt we unmask.
  97. */
  98. intel_runtime_pm_get_noresume(i915);
  99. b->rpm_wakelock = true;
  100. /* No interrupts? Kick the waiter every jiffie! */
  101. if (intel_irqs_enabled(i915)) {
  102. if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
  103. irq_enable(engine);
  104. b->irq_enabled = true;
  105. }
  106. if (!b->irq_enabled ||
  107. test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
  108. mod_timer(&b->fake_irq, jiffies + 1);
  109. } else {
  110. /* Ensure we never sleep indefinitely */
  111. GEM_BUG_ON(!time_after(b->timeout, jiffies));
  112. mod_timer(&b->hangcheck, b->timeout);
  113. }
  114. }
  115. static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
  116. {
  117. struct intel_engine_cs *engine =
  118. container_of(b, struct intel_engine_cs, breadcrumbs);
  119. assert_spin_locked(&b->lock);
  120. if (!b->rpm_wakelock)
  121. return;
  122. if (b->irq_enabled) {
  123. irq_disable(engine);
  124. b->irq_enabled = false;
  125. }
  126. intel_runtime_pm_put(engine->i915);
  127. b->rpm_wakelock = false;
  128. }
  129. static inline struct intel_wait *to_wait(struct rb_node *node)
  130. {
  131. return container_of(node, struct intel_wait, node);
  132. }
  133. static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
  134. struct intel_wait *wait)
  135. {
  136. assert_spin_locked(&b->lock);
  137. /* This request is completed, so remove it from the tree, mark it as
  138. * complete, and *then* wake up the associated task.
  139. */
  140. rb_erase(&wait->node, &b->waiters);
  141. RB_CLEAR_NODE(&wait->node);
  142. wake_up_process(wait->tsk); /* implicit smp_wmb() */
  143. }
  144. static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
  145. struct intel_wait *wait)
  146. {
  147. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  148. struct rb_node **p, *parent, *completed;
  149. bool first;
  150. u32 seqno;
  151. /* Insert the request into the retirement ordered list
  152. * of waiters by walking the rbtree. If we are the oldest
  153. * seqno in the tree (the first to be retired), then
  154. * set ourselves as the bottom-half.
  155. *
  156. * As we descend the tree, prune completed branches since we hold the
  157. * spinlock we know that the first_waiter must be delayed and can
  158. * reduce some of the sequential wake up latency if we take action
  159. * ourselves and wake up the completed tasks in parallel. Also, by
  160. * removing stale elements in the tree, we may be able to reduce the
  161. * ping-pong between the old bottom-half and ourselves as first-waiter.
  162. */
  163. first = true;
  164. parent = NULL;
  165. completed = NULL;
  166. seqno = intel_engine_get_seqno(engine);
  167. /* If the request completed before we managed to grab the spinlock,
  168. * return now before adding ourselves to the rbtree. We let the
  169. * current bottom-half handle any pending wakeups and instead
  170. * try and get out of the way quickly.
  171. */
  172. if (i915_seqno_passed(seqno, wait->seqno)) {
  173. RB_CLEAR_NODE(&wait->node);
  174. return first;
  175. }
  176. p = &b->waiters.rb_node;
  177. while (*p) {
  178. parent = *p;
  179. if (wait->seqno == to_wait(parent)->seqno) {
  180. /* We have multiple waiters on the same seqno, select
  181. * the highest priority task (that with the smallest
  182. * task->prio) to serve as the bottom-half for this
  183. * group.
  184. */
  185. if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
  186. p = &parent->rb_right;
  187. first = false;
  188. } else {
  189. p = &parent->rb_left;
  190. }
  191. } else if (i915_seqno_passed(wait->seqno,
  192. to_wait(parent)->seqno)) {
  193. p = &parent->rb_right;
  194. if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
  195. completed = parent;
  196. else
  197. first = false;
  198. } else {
  199. p = &parent->rb_left;
  200. }
  201. }
  202. rb_link_node(&wait->node, parent, p);
  203. rb_insert_color(&wait->node, &b->waiters);
  204. GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
  205. if (completed) {
  206. struct rb_node *next = rb_next(completed);
  207. GEM_BUG_ON(!next && !first);
  208. if (next && next != &wait->node) {
  209. GEM_BUG_ON(first);
  210. b->timeout = wait_timeout();
  211. b->first_wait = to_wait(next);
  212. rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
  213. /* As there is a delay between reading the current
  214. * seqno, processing the completed tasks and selecting
  215. * the next waiter, we may have missed the interrupt
  216. * and so need for the next bottom-half to wakeup.
  217. *
  218. * Also as we enable the IRQ, we may miss the
  219. * interrupt for that seqno, so we have to wake up
  220. * the next bottom-half in order to do a coherent check
  221. * in case the seqno passed.
  222. */
  223. __intel_breadcrumbs_enable_irq(b);
  224. if (READ_ONCE(b->irq_posted))
  225. wake_up_process(to_wait(next)->tsk);
  226. }
  227. do {
  228. struct intel_wait *crumb = to_wait(completed);
  229. completed = rb_prev(completed);
  230. __intel_breadcrumbs_finish(b, crumb);
  231. } while (completed);
  232. }
  233. if (first) {
  234. GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
  235. b->timeout = wait_timeout();
  236. b->first_wait = wait;
  237. rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
  238. /* After assigning ourselves as the new bottom-half, we must
  239. * perform a cursory check to prevent a missed interrupt.
  240. * Either we miss the interrupt whilst programming the hardware,
  241. * or if there was a previous waiter (for a later seqno) they
  242. * may be woken instead of us (due to the inherent race
  243. * in the unlocked read of b->irq_seqno_bh in the irq handler)
  244. * and so we miss the wake up.
  245. */
  246. __intel_breadcrumbs_enable_irq(b);
  247. }
  248. GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
  249. GEM_BUG_ON(!b->first_wait);
  250. GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
  251. return first;
  252. }
  253. bool intel_engine_add_wait(struct intel_engine_cs *engine,
  254. struct intel_wait *wait)
  255. {
  256. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  257. bool first;
  258. spin_lock(&b->lock);
  259. first = __intel_engine_add_wait(engine, wait);
  260. spin_unlock(&b->lock);
  261. return first;
  262. }
  263. static inline bool chain_wakeup(struct rb_node *rb, int priority)
  264. {
  265. return rb && to_wait(rb)->tsk->prio <= priority;
  266. }
  267. static inline int wakeup_priority(struct intel_breadcrumbs *b,
  268. struct task_struct *tsk)
  269. {
  270. if (tsk == b->signaler)
  271. return INT_MIN;
  272. else
  273. return tsk->prio;
  274. }
  275. void intel_engine_remove_wait(struct intel_engine_cs *engine,
  276. struct intel_wait *wait)
  277. {
  278. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  279. /* Quick check to see if this waiter was already decoupled from
  280. * the tree by the bottom-half to avoid contention on the spinlock
  281. * by the herd.
  282. */
  283. if (RB_EMPTY_NODE(&wait->node))
  284. return;
  285. spin_lock(&b->lock);
  286. if (RB_EMPTY_NODE(&wait->node))
  287. goto out_unlock;
  288. if (b->first_wait == wait) {
  289. const int priority = wakeup_priority(b, wait->tsk);
  290. struct rb_node *next;
  291. GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
  292. /* We are the current bottom-half. Find the next candidate,
  293. * the first waiter in the queue on the remaining oldest
  294. * request. As multiple seqnos may complete in the time it
  295. * takes us to wake up and find the next waiter, we have to
  296. * wake up that waiter for it to perform its own coherent
  297. * completion check.
  298. */
  299. next = rb_next(&wait->node);
  300. if (chain_wakeup(next, priority)) {
  301. /* If the next waiter is already complete,
  302. * wake it up and continue onto the next waiter. So
  303. * if have a small herd, they will wake up in parallel
  304. * rather than sequentially, which should reduce
  305. * the overall latency in waking all the completed
  306. * clients.
  307. *
  308. * However, waking up a chain adds extra latency to
  309. * the first_waiter. This is undesirable if that
  310. * waiter is a high priority task.
  311. */
  312. u32 seqno = intel_engine_get_seqno(engine);
  313. while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
  314. struct rb_node *n = rb_next(next);
  315. __intel_breadcrumbs_finish(b, to_wait(next));
  316. next = n;
  317. if (!chain_wakeup(next, priority))
  318. break;
  319. }
  320. }
  321. if (next) {
  322. /* In our haste, we may have completed the first waiter
  323. * before we enabled the interrupt. Do so now as we
  324. * have a second waiter for a future seqno. Afterwards,
  325. * we have to wake up that waiter in case we missed
  326. * the interrupt, or if we have to handle an
  327. * exception rather than a seqno completion.
  328. */
  329. b->timeout = wait_timeout();
  330. b->first_wait = to_wait(next);
  331. rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
  332. if (b->first_wait->seqno != wait->seqno)
  333. __intel_breadcrumbs_enable_irq(b);
  334. wake_up_process(b->first_wait->tsk);
  335. } else {
  336. b->first_wait = NULL;
  337. rcu_assign_pointer(b->irq_seqno_bh, NULL);
  338. __intel_breadcrumbs_disable_irq(b);
  339. }
  340. } else {
  341. GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
  342. }
  343. GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
  344. rb_erase(&wait->node, &b->waiters);
  345. out_unlock:
  346. GEM_BUG_ON(b->first_wait == wait);
  347. GEM_BUG_ON(rb_first(&b->waiters) !=
  348. (b->first_wait ? &b->first_wait->node : NULL));
  349. GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
  350. spin_unlock(&b->lock);
  351. }
  352. static bool signal_complete(struct drm_i915_gem_request *request)
  353. {
  354. if (!request)
  355. return false;
  356. /* If another process served as the bottom-half it may have already
  357. * signalled that this wait is already completed.
  358. */
  359. if (intel_wait_complete(&request->signaling.wait))
  360. return true;
  361. /* Carefully check if the request is complete, giving time for the
  362. * seqno to be visible or if the GPU hung.
  363. */
  364. if (__i915_request_irq_complete(request))
  365. return true;
  366. return false;
  367. }
  368. static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
  369. {
  370. return container_of(rb, struct drm_i915_gem_request, signaling.node);
  371. }
  372. static void signaler_set_rtpriority(void)
  373. {
  374. struct sched_param param = { .sched_priority = 1 };
  375. sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
  376. }
  377. static int intel_breadcrumbs_signaler(void *arg)
  378. {
  379. struct intel_engine_cs *engine = arg;
  380. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  381. struct drm_i915_gem_request *request;
  382. /* Install ourselves with high priority to reduce signalling latency */
  383. signaler_set_rtpriority();
  384. do {
  385. set_current_state(TASK_INTERRUPTIBLE);
  386. /* We are either woken up by the interrupt bottom-half,
  387. * or by a client adding a new signaller. In both cases,
  388. * the GPU seqno may have advanced beyond our oldest signal.
  389. * If it has, propagate the signal, remove the waiter and
  390. * check again with the next oldest signal. Otherwise we
  391. * need to wait for a new interrupt from the GPU or for
  392. * a new client.
  393. */
  394. request = READ_ONCE(b->first_signal);
  395. if (signal_complete(request)) {
  396. /* Wake up all other completed waiters and select the
  397. * next bottom-half for the next user interrupt.
  398. */
  399. intel_engine_remove_wait(engine,
  400. &request->signaling.wait);
  401. local_bh_disable();
  402. fence_signal(&request->fence);
  403. local_bh_enable(); /* kick start the tasklets */
  404. /* Find the next oldest signal. Note that as we have
  405. * not been holding the lock, another client may
  406. * have installed an even older signal than the one
  407. * we just completed - so double check we are still
  408. * the oldest before picking the next one.
  409. */
  410. spin_lock(&b->lock);
  411. if (request == b->first_signal) {
  412. struct rb_node *rb =
  413. rb_next(&request->signaling.node);
  414. b->first_signal = rb ? to_signaler(rb) : NULL;
  415. }
  416. rb_erase(&request->signaling.node, &b->signals);
  417. spin_unlock(&b->lock);
  418. i915_gem_request_put(request);
  419. } else {
  420. if (kthread_should_stop())
  421. break;
  422. schedule();
  423. }
  424. } while (1);
  425. __set_current_state(TASK_RUNNING);
  426. return 0;
  427. }
  428. void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
  429. {
  430. struct intel_engine_cs *engine = request->engine;
  431. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  432. struct rb_node *parent, **p;
  433. bool first, wakeup;
  434. /* locked by fence_enable_sw_signaling() */
  435. assert_spin_locked(&request->lock);
  436. request->signaling.wait.tsk = b->signaler;
  437. request->signaling.wait.seqno = request->fence.seqno;
  438. i915_gem_request_get(request);
  439. spin_lock(&b->lock);
  440. /* First add ourselves into the list of waiters, but register our
  441. * bottom-half as the signaller thread. As per usual, only the oldest
  442. * waiter (not just signaller) is tasked as the bottom-half waking
  443. * up all completed waiters after the user interrupt.
  444. *
  445. * If we are the oldest waiter, enable the irq (after which we
  446. * must double check that the seqno did not complete).
  447. */
  448. wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
  449. /* Now insert ourselves into the retirement ordered list of signals
  450. * on this engine. We track the oldest seqno as that will be the
  451. * first signal to complete.
  452. */
  453. parent = NULL;
  454. first = true;
  455. p = &b->signals.rb_node;
  456. while (*p) {
  457. parent = *p;
  458. if (i915_seqno_passed(request->fence.seqno,
  459. to_signaler(parent)->fence.seqno)) {
  460. p = &parent->rb_right;
  461. first = false;
  462. } else {
  463. p = &parent->rb_left;
  464. }
  465. }
  466. rb_link_node(&request->signaling.node, parent, p);
  467. rb_insert_color(&request->signaling.node, &b->signals);
  468. if (first)
  469. smp_store_mb(b->first_signal, request);
  470. spin_unlock(&b->lock);
  471. if (wakeup)
  472. wake_up_process(b->signaler);
  473. }
  474. int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
  475. {
  476. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  477. struct task_struct *tsk;
  478. spin_lock_init(&b->lock);
  479. setup_timer(&b->fake_irq,
  480. intel_breadcrumbs_fake_irq,
  481. (unsigned long)engine);
  482. setup_timer(&b->hangcheck,
  483. intel_breadcrumbs_hangcheck,
  484. (unsigned long)engine);
  485. /* Spawn a thread to provide a common bottom-half for all signals.
  486. * As this is an asynchronous interface we cannot steal the current
  487. * task for handling the bottom-half to the user interrupt, therefore
  488. * we create a thread to do the coherent seqno dance after the
  489. * interrupt and then signal the waitqueue (via the dma-buf/fence).
  490. */
  491. tsk = kthread_run(intel_breadcrumbs_signaler, engine,
  492. "i915/signal:%d", engine->id);
  493. if (IS_ERR(tsk))
  494. return PTR_ERR(tsk);
  495. b->signaler = tsk;
  496. return 0;
  497. }
  498. static void cancel_fake_irq(struct intel_engine_cs *engine)
  499. {
  500. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  501. del_timer_sync(&b->hangcheck);
  502. del_timer_sync(&b->fake_irq);
  503. clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
  504. }
  505. void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
  506. {
  507. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  508. cancel_fake_irq(engine);
  509. spin_lock(&b->lock);
  510. __intel_breadcrumbs_disable_irq(b);
  511. if (intel_engine_has_waiter(engine)) {
  512. b->timeout = wait_timeout();
  513. __intel_breadcrumbs_enable_irq(b);
  514. if (READ_ONCE(b->irq_posted))
  515. wake_up_process(b->first_wait->tsk);
  516. } else {
  517. /* sanitize the IMR and unmask any auxiliary interrupts */
  518. irq_disable(engine);
  519. }
  520. spin_unlock(&b->lock);
  521. }
  522. void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
  523. {
  524. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  525. if (!IS_ERR_OR_NULL(b->signaler))
  526. kthread_stop(b->signaler);
  527. cancel_fake_irq(engine);
  528. }
  529. unsigned int intel_kick_waiters(struct drm_i915_private *i915)
  530. {
  531. struct intel_engine_cs *engine;
  532. unsigned int mask = 0;
  533. /* To avoid the task_struct disappearing beneath us as we wake up
  534. * the process, we must first inspect the task_struct->state under the
  535. * RCU lock, i.e. as we call wake_up_process() we must be holding the
  536. * rcu_read_lock().
  537. */
  538. for_each_engine(engine, i915)
  539. if (unlikely(intel_engine_wakeup(engine)))
  540. mask |= intel_engine_flag(engine);
  541. return mask;
  542. }
  543. unsigned int intel_kick_signalers(struct drm_i915_private *i915)
  544. {
  545. struct intel_engine_cs *engine;
  546. unsigned int mask = 0;
  547. for_each_engine(engine, i915) {
  548. if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
  549. wake_up_process(engine->breadcrumbs.signaler);
  550. mask |= intel_engine_flag(engine);
  551. }
  552. }
  553. return mask;
  554. }