rcutiny_plugin.h 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright (c) 2010 Linaro
  21. *
  22. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  23. */
  24. #include <linux/kthread.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/seq_file.h>
  27. #ifdef CONFIG_RCU_TRACE
  28. #define RCU_TRACE(stmt) stmt
  29. #else /* #ifdef CONFIG_RCU_TRACE */
  30. #define RCU_TRACE(stmt)
  31. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  32. /* Global control variables for rcupdate callback mechanism. */
  33. struct rcu_ctrlblk {
  34. struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
  35. struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
  36. struct rcu_head **curtail; /* ->next pointer of last CB. */
  37. RCU_TRACE(long qlen); /* Number of pending CBs. */
  38. };
  39. /* Definition for rcupdate control block. */
  40. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  41. .donetail = &rcu_sched_ctrlblk.rcucblist,
  42. .curtail = &rcu_sched_ctrlblk.rcucblist,
  43. };
  44. static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  45. .donetail = &rcu_bh_ctrlblk.rcucblist,
  46. .curtail = &rcu_bh_ctrlblk.rcucblist,
  47. };
  48. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  49. int rcu_scheduler_active __read_mostly;
  50. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  51. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  52. #ifdef CONFIG_TINY_PREEMPT_RCU
  53. #include <linux/delay.h>
  54. /* Global control variables for preemptible RCU. */
  55. struct rcu_preempt_ctrlblk {
  56. struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
  57. struct rcu_head **nexttail;
  58. /* Tasks blocked in a preemptible RCU */
  59. /* read-side critical section while an */
  60. /* preemptible-RCU grace period is in */
  61. /* progress must wait for a later grace */
  62. /* period. This pointer points to the */
  63. /* ->next pointer of the last task that */
  64. /* must wait for a later grace period, or */
  65. /* to &->rcb.rcucblist if there is no */
  66. /* such task. */
  67. struct list_head blkd_tasks;
  68. /* Tasks blocked in RCU read-side critical */
  69. /* section. Tasks are placed at the head */
  70. /* of this list and age towards the tail. */
  71. struct list_head *gp_tasks;
  72. /* Pointer to the first task blocking the */
  73. /* current grace period, or NULL if there */
  74. /* is no such task. */
  75. struct list_head *exp_tasks;
  76. /* Pointer to first task blocking the */
  77. /* current expedited grace period, or NULL */
  78. /* if there is no such task. If there */
  79. /* is no current expedited grace period, */
  80. /* then there cannot be any such task. */
  81. #ifdef CONFIG_RCU_BOOST
  82. struct list_head *boost_tasks;
  83. /* Pointer to first task that needs to be */
  84. /* priority-boosted, or NULL if no priority */
  85. /* boosting is needed. If there is no */
  86. /* current or expedited grace period, there */
  87. /* can be no such task. */
  88. #endif /* #ifdef CONFIG_RCU_BOOST */
  89. u8 gpnum; /* Current grace period. */
  90. u8 gpcpu; /* Last grace period blocked by the CPU. */
  91. u8 completed; /* Last grace period completed. */
  92. /* If all three are equal, RCU is idle. */
  93. #ifdef CONFIG_RCU_BOOST
  94. unsigned long boost_time; /* When to start boosting (jiffies) */
  95. #endif /* #ifdef CONFIG_RCU_BOOST */
  96. #ifdef CONFIG_RCU_TRACE
  97. unsigned long n_grace_periods;
  98. #ifdef CONFIG_RCU_BOOST
  99. unsigned long n_tasks_boosted;
  100. /* Total number of tasks boosted. */
  101. unsigned long n_exp_boosts;
  102. /* Number of tasks boosted for expedited GP. */
  103. unsigned long n_normal_boosts;
  104. /* Number of tasks boosted for normal GP. */
  105. unsigned long n_balk_blkd_tasks;
  106. /* Refused to boost: no blocked tasks. */
  107. unsigned long n_balk_exp_gp_tasks;
  108. /* Refused to boost: nothing blocking GP. */
  109. unsigned long n_balk_boost_tasks;
  110. /* Refused to boost: already boosting. */
  111. unsigned long n_balk_notyet;
  112. /* Refused to boost: not yet time. */
  113. unsigned long n_balk_nos;
  114. /* Refused to boost: not sure why, though. */
  115. /* This can happen due to race conditions. */
  116. #endif /* #ifdef CONFIG_RCU_BOOST */
  117. #endif /* #ifdef CONFIG_RCU_TRACE */
  118. };
  119. static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
  120. .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  121. .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  122. .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  123. .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
  124. };
  125. static int rcu_preempted_readers_exp(void);
  126. static void rcu_report_exp_done(void);
  127. /*
  128. * Return true if the CPU has not yet responded to the current grace period.
  129. */
  130. static int rcu_cpu_blocking_cur_gp(void)
  131. {
  132. return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
  133. }
  134. /*
  135. * Check for a running RCU reader. Because there is only one CPU,
  136. * there can be but one running RCU reader at a time. ;-)
  137. */
  138. static int rcu_preempt_running_reader(void)
  139. {
  140. return current->rcu_read_lock_nesting;
  141. }
  142. /*
  143. * Check for preempted RCU readers blocking any grace period.
  144. * If the caller needs a reliable answer, it must disable hard irqs.
  145. */
  146. static int rcu_preempt_blocked_readers_any(void)
  147. {
  148. return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
  149. }
  150. /*
  151. * Check for preempted RCU readers blocking the current grace period.
  152. * If the caller needs a reliable answer, it must disable hard irqs.
  153. */
  154. static int rcu_preempt_blocked_readers_cgp(void)
  155. {
  156. return rcu_preempt_ctrlblk.gp_tasks != NULL;
  157. }
  158. /*
  159. * Return true if another preemptible-RCU grace period is needed.
  160. */
  161. static int rcu_preempt_needs_another_gp(void)
  162. {
  163. return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
  164. }
  165. /*
  166. * Return true if a preemptible-RCU grace period is in progress.
  167. * The caller must disable hardirqs.
  168. */
  169. static int rcu_preempt_gp_in_progress(void)
  170. {
  171. return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
  172. }
  173. /*
  174. * Advance a ->blkd_tasks-list pointer to the next entry, instead
  175. * returning NULL if at the end of the list.
  176. */
  177. static struct list_head *rcu_next_node_entry(struct task_struct *t)
  178. {
  179. struct list_head *np;
  180. np = t->rcu_node_entry.next;
  181. if (np == &rcu_preempt_ctrlblk.blkd_tasks)
  182. np = NULL;
  183. return np;
  184. }
  185. #ifdef CONFIG_RCU_TRACE
  186. #ifdef CONFIG_RCU_BOOST
  187. static void rcu_initiate_boost_trace(void);
  188. #endif /* #ifdef CONFIG_RCU_BOOST */
  189. /*
  190. * Dump additional statistice for TINY_PREEMPT_RCU.
  191. */
  192. static void show_tiny_preempt_stats(struct seq_file *m)
  193. {
  194. seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
  195. rcu_preempt_ctrlblk.rcb.qlen,
  196. rcu_preempt_ctrlblk.n_grace_periods,
  197. rcu_preempt_ctrlblk.gpnum,
  198. rcu_preempt_ctrlblk.gpcpu,
  199. rcu_preempt_ctrlblk.completed,
  200. "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
  201. "N."[!rcu_preempt_ctrlblk.gp_tasks],
  202. "E."[!rcu_preempt_ctrlblk.exp_tasks]);
  203. #ifdef CONFIG_RCU_BOOST
  204. seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
  205. " ",
  206. "B."[!rcu_preempt_ctrlblk.boost_tasks],
  207. rcu_preempt_ctrlblk.n_tasks_boosted,
  208. rcu_preempt_ctrlblk.n_exp_boosts,
  209. rcu_preempt_ctrlblk.n_normal_boosts,
  210. (int)(jiffies & 0xffff),
  211. (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
  212. seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
  213. " balk",
  214. rcu_preempt_ctrlblk.n_balk_blkd_tasks,
  215. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
  216. rcu_preempt_ctrlblk.n_balk_boost_tasks,
  217. rcu_preempt_ctrlblk.n_balk_notyet,
  218. rcu_preempt_ctrlblk.n_balk_nos);
  219. #endif /* #ifdef CONFIG_RCU_BOOST */
  220. }
  221. #endif /* #ifdef CONFIG_RCU_TRACE */
  222. #ifdef CONFIG_RCU_BOOST
  223. #include "rtmutex_common.h"
  224. /*
  225. * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
  226. * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
  227. */
  228. static int rcu_boost(void)
  229. {
  230. unsigned long flags;
  231. struct rt_mutex mtx;
  232. struct task_struct *t;
  233. struct list_head *tb;
  234. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  235. rcu_preempt_ctrlblk.exp_tasks == NULL)
  236. return 0; /* Nothing to boost. */
  237. raw_local_irq_save(flags);
  238. /*
  239. * Recheck with irqs disabled: all tasks in need of boosting
  240. * might exit their RCU read-side critical sections on their own
  241. * if we are preempted just before disabling irqs.
  242. */
  243. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  244. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  245. raw_local_irq_restore(flags);
  246. return 0;
  247. }
  248. /*
  249. * Preferentially boost tasks blocking expedited grace periods.
  250. * This cannot starve the normal grace periods because a second
  251. * expedited grace period must boost all blocked tasks, including
  252. * those blocking the pre-existing normal grace period.
  253. */
  254. if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
  255. tb = rcu_preempt_ctrlblk.exp_tasks;
  256. RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
  257. } else {
  258. tb = rcu_preempt_ctrlblk.boost_tasks;
  259. RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
  260. }
  261. RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
  262. /*
  263. * We boost task t by manufacturing an rt_mutex that appears to
  264. * be held by task t. We leave a pointer to that rt_mutex where
  265. * task t can find it, and task t will release the mutex when it
  266. * exits its outermost RCU read-side critical section. Then
  267. * simply acquiring this artificial rt_mutex will boost task
  268. * t's priority. (Thanks to tglx for suggesting this approach!)
  269. */
  270. t = container_of(tb, struct task_struct, rcu_node_entry);
  271. rt_mutex_init_proxy_locked(&mtx, t);
  272. t->rcu_boost_mutex = &mtx;
  273. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
  274. raw_local_irq_restore(flags);
  275. rt_mutex_lock(&mtx);
  276. rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
  277. return rcu_preempt_ctrlblk.boost_tasks != NULL ||
  278. rcu_preempt_ctrlblk.exp_tasks != NULL;
  279. }
  280. /*
  281. * Check to see if it is now time to start boosting RCU readers blocking
  282. * the current grace period, and, if so, tell the rcu_kthread_task to
  283. * start boosting them. If there is an expedited boost in progress,
  284. * we wait for it to complete.
  285. *
  286. * If there are no blocked readers blocking the current grace period,
  287. * return 0 to let the caller know, otherwise return 1. Note that this
  288. * return value is independent of whether or not boosting was done.
  289. */
  290. static int rcu_initiate_boost(void)
  291. {
  292. if (!rcu_preempt_blocked_readers_cgp() &&
  293. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  294. RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
  295. return 0;
  296. }
  297. if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
  298. (rcu_preempt_ctrlblk.gp_tasks != NULL &&
  299. rcu_preempt_ctrlblk.boost_tasks == NULL &&
  300. ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
  301. if (rcu_preempt_ctrlblk.exp_tasks == NULL)
  302. rcu_preempt_ctrlblk.boost_tasks =
  303. rcu_preempt_ctrlblk.gp_tasks;
  304. invoke_rcu_kthread();
  305. } else
  306. RCU_TRACE(rcu_initiate_boost_trace());
  307. return 1;
  308. }
  309. #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  310. /*
  311. * Do priority-boost accounting for the start of a new grace period.
  312. */
  313. static void rcu_preempt_boost_start_gp(void)
  314. {
  315. rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  316. }
  317. #else /* #ifdef CONFIG_RCU_BOOST */
  318. /*
  319. * If there is no RCU priority boosting, we don't boost.
  320. */
  321. static int rcu_boost(void)
  322. {
  323. return 0;
  324. }
  325. /*
  326. * If there is no RCU priority boosting, we don't initiate boosting,
  327. * but we do indicate whether there are blocked readers blocking the
  328. * current grace period.
  329. */
  330. static int rcu_initiate_boost(void)
  331. {
  332. return rcu_preempt_blocked_readers_cgp();
  333. }
  334. /*
  335. * If there is no RCU priority boosting, nothing to do at grace-period start.
  336. */
  337. static void rcu_preempt_boost_start_gp(void)
  338. {
  339. }
  340. #endif /* else #ifdef CONFIG_RCU_BOOST */
  341. /*
  342. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  343. * that this just means that the task currently running on the CPU is
  344. * in a quiescent state. There might be any number of tasks blocked
  345. * while in an RCU read-side critical section.
  346. *
  347. * Unlike the other rcu_*_qs() functions, callers to this function
  348. * must disable irqs in order to protect the assignment to
  349. * ->rcu_read_unlock_special.
  350. *
  351. * Because this is a single-CPU implementation, the only way a grace
  352. * period can end is if the CPU is in a quiescent state. The reason is
  353. * that a blocked preemptible-RCU reader can exit its critical section
  354. * only if the CPU is running it at the time. Therefore, when the
  355. * last task blocking the current grace period exits its RCU read-side
  356. * critical section, neither the CPU nor blocked tasks will be stopping
  357. * the current grace period. (In contrast, SMP implementations
  358. * might have CPUs running in RCU read-side critical sections that
  359. * block later grace periods -- but this is not possible given only
  360. * one CPU.)
  361. */
  362. static void rcu_preempt_cpu_qs(void)
  363. {
  364. /* Record both CPU and task as having responded to current GP. */
  365. rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
  366. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  367. /* If there is no GP then there is nothing more to do. */
  368. if (!rcu_preempt_gp_in_progress())
  369. return;
  370. /*
  371. * Check up on boosting. If there are readers blocking the
  372. * current grace period, leave.
  373. */
  374. if (rcu_initiate_boost())
  375. return;
  376. /* Advance callbacks. */
  377. rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
  378. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
  379. rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
  380. /* If there are no blocked readers, next GP is done instantly. */
  381. if (!rcu_preempt_blocked_readers_any())
  382. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
  383. /* If there are done callbacks, cause them to be invoked. */
  384. if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
  385. invoke_rcu_kthread();
  386. }
  387. /*
  388. * Start a new RCU grace period if warranted. Hard irqs must be disabled.
  389. */
  390. static void rcu_preempt_start_gp(void)
  391. {
  392. if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
  393. /* Official start of GP. */
  394. rcu_preempt_ctrlblk.gpnum++;
  395. RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
  396. /* Any blocked RCU readers block new GP. */
  397. if (rcu_preempt_blocked_readers_any())
  398. rcu_preempt_ctrlblk.gp_tasks =
  399. rcu_preempt_ctrlblk.blkd_tasks.next;
  400. /* Set up for RCU priority boosting. */
  401. rcu_preempt_boost_start_gp();
  402. /* If there is no running reader, CPU is done with GP. */
  403. if (!rcu_preempt_running_reader())
  404. rcu_preempt_cpu_qs();
  405. }
  406. }
  407. /*
  408. * We have entered the scheduler, and the current task might soon be
  409. * context-switched away from. If this task is in an RCU read-side
  410. * critical section, we will no longer be able to rely on the CPU to
  411. * record that fact, so we enqueue the task on the blkd_tasks list.
  412. * If the task started after the current grace period began, as recorded
  413. * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
  414. * before the element referenced by ->gp_tasks (or at the tail if
  415. * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
  416. * The task will dequeue itself when it exits the outermost enclosing
  417. * RCU read-side critical section. Therefore, the current grace period
  418. * cannot be permitted to complete until the ->gp_tasks pointer becomes
  419. * NULL.
  420. *
  421. * Caller must disable preemption.
  422. */
  423. void rcu_preempt_note_context_switch(void)
  424. {
  425. struct task_struct *t = current;
  426. unsigned long flags;
  427. local_irq_save(flags); /* must exclude scheduler_tick(). */
  428. if (rcu_preempt_running_reader() &&
  429. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  430. /* Possibly blocking in an RCU read-side critical section. */
  431. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  432. /*
  433. * If this CPU has already checked in, then this task
  434. * will hold up the next grace period rather than the
  435. * current grace period. Queue the task accordingly.
  436. * If the task is queued for the current grace period
  437. * (i.e., this CPU has not yet passed through a quiescent
  438. * state for the current grace period), then as long
  439. * as that task remains queued, the current grace period
  440. * cannot end.
  441. */
  442. list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
  443. if (rcu_cpu_blocking_cur_gp())
  444. rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
  445. }
  446. /*
  447. * Either we were not in an RCU read-side critical section to
  448. * begin with, or we have now recorded that critical section
  449. * globally. Either way, we can now note a quiescent state
  450. * for this CPU. Again, if we were in an RCU read-side critical
  451. * section, and if that critical section was blocking the current
  452. * grace period, then the fact that the task has been enqueued
  453. * means that current grace period continues to be blocked.
  454. */
  455. rcu_preempt_cpu_qs();
  456. local_irq_restore(flags);
  457. }
  458. /*
  459. * Tiny-preemptible RCU implementation for rcu_read_lock().
  460. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  461. * if we block.
  462. */
  463. void __rcu_read_lock(void)
  464. {
  465. current->rcu_read_lock_nesting++;
  466. barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
  467. }
  468. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  469. /*
  470. * Handle special cases during rcu_read_unlock(), such as needing to
  471. * notify RCU core processing or task having blocked during the RCU
  472. * read-side critical section.
  473. */
  474. static void rcu_read_unlock_special(struct task_struct *t)
  475. {
  476. int empty;
  477. int empty_exp;
  478. unsigned long flags;
  479. struct list_head *np;
  480. int special;
  481. /*
  482. * NMI handlers cannot block and cannot safely manipulate state.
  483. * They therefore cannot possibly be special, so just leave.
  484. */
  485. if (in_nmi())
  486. return;
  487. local_irq_save(flags);
  488. /*
  489. * If RCU core is waiting for this CPU to exit critical section,
  490. * let it know that we have done so.
  491. */
  492. special = t->rcu_read_unlock_special;
  493. if (special & RCU_READ_UNLOCK_NEED_QS)
  494. rcu_preempt_cpu_qs();
  495. /* Hardware IRQ handlers cannot block. */
  496. if (in_irq()) {
  497. local_irq_restore(flags);
  498. return;
  499. }
  500. /* Clean up if blocked during RCU read-side critical section. */
  501. if (special & RCU_READ_UNLOCK_BLOCKED) {
  502. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  503. /*
  504. * Remove this task from the ->blkd_tasks list and adjust
  505. * any pointers that might have been referencing it.
  506. */
  507. empty = !rcu_preempt_blocked_readers_cgp();
  508. empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
  509. np = rcu_next_node_entry(t);
  510. list_del_init(&t->rcu_node_entry);
  511. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
  512. rcu_preempt_ctrlblk.gp_tasks = np;
  513. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
  514. rcu_preempt_ctrlblk.exp_tasks = np;
  515. #ifdef CONFIG_RCU_BOOST
  516. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
  517. rcu_preempt_ctrlblk.boost_tasks = np;
  518. #endif /* #ifdef CONFIG_RCU_BOOST */
  519. /*
  520. * If this was the last task on the current list, and if
  521. * we aren't waiting on the CPU, report the quiescent state
  522. * and start a new grace period if needed.
  523. */
  524. if (!empty && !rcu_preempt_blocked_readers_cgp()) {
  525. rcu_preempt_cpu_qs();
  526. rcu_preempt_start_gp();
  527. }
  528. /*
  529. * If this was the last task on the expedited lists,
  530. * then we need wake up the waiting task.
  531. */
  532. if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
  533. rcu_report_exp_done();
  534. }
  535. #ifdef CONFIG_RCU_BOOST
  536. /* Unboost self if was boosted. */
  537. if (special & RCU_READ_UNLOCK_BOOSTED) {
  538. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
  539. rt_mutex_unlock(t->rcu_boost_mutex);
  540. t->rcu_boost_mutex = NULL;
  541. }
  542. #endif /* #ifdef CONFIG_RCU_BOOST */
  543. local_irq_restore(flags);
  544. }
  545. /*
  546. * Tiny-preemptible RCU implementation for rcu_read_unlock().
  547. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  548. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  549. * invoke rcu_read_unlock_special() to clean up after a context switch
  550. * in an RCU read-side critical section and other special cases.
  551. */
  552. void __rcu_read_unlock(void)
  553. {
  554. struct task_struct *t = current;
  555. barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
  556. --t->rcu_read_lock_nesting;
  557. barrier(); /* decrement before load of ->rcu_read_unlock_special */
  558. if (t->rcu_read_lock_nesting == 0 &&
  559. unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  560. rcu_read_unlock_special(t);
  561. #ifdef CONFIG_PROVE_LOCKING
  562. WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
  563. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  564. }
  565. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  566. /*
  567. * Check for a quiescent state from the current CPU. When a task blocks,
  568. * the task is recorded in the rcu_preempt_ctrlblk structure, which is
  569. * checked elsewhere. This is called from the scheduling-clock interrupt.
  570. *
  571. * Caller must disable hard irqs.
  572. */
  573. static void rcu_preempt_check_callbacks(void)
  574. {
  575. struct task_struct *t = current;
  576. if (rcu_preempt_gp_in_progress() &&
  577. (!rcu_preempt_running_reader() ||
  578. !rcu_cpu_blocking_cur_gp()))
  579. rcu_preempt_cpu_qs();
  580. if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
  581. rcu_preempt_ctrlblk.rcb.donetail)
  582. invoke_rcu_kthread();
  583. if (rcu_preempt_gp_in_progress() &&
  584. rcu_cpu_blocking_cur_gp() &&
  585. rcu_preempt_running_reader())
  586. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  587. }
  588. /*
  589. * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
  590. * update, so this is invoked from rcu_process_callbacks() to
  591. * handle that case. Of course, it is invoked for all flavors of
  592. * RCU, but RCU callbacks can appear only on one of the lists, and
  593. * neither ->nexttail nor ->donetail can possibly be NULL, so there
  594. * is no need for an explicit check.
  595. */
  596. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  597. {
  598. if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
  599. rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
  600. }
  601. /*
  602. * Process callbacks for preemptible RCU.
  603. */
  604. static void rcu_preempt_process_callbacks(void)
  605. {
  606. rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
  607. }
  608. /*
  609. * Queue a preemptible -RCU callback for invocation after a grace period.
  610. */
  611. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  612. {
  613. unsigned long flags;
  614. debug_rcu_head_queue(head);
  615. head->func = func;
  616. head->next = NULL;
  617. local_irq_save(flags);
  618. *rcu_preempt_ctrlblk.nexttail = head;
  619. rcu_preempt_ctrlblk.nexttail = &head->next;
  620. RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
  621. rcu_preempt_start_gp(); /* checks to see if GP needed. */
  622. local_irq_restore(flags);
  623. }
  624. EXPORT_SYMBOL_GPL(call_rcu);
  625. void rcu_barrier(void)
  626. {
  627. struct rcu_synchronize rcu;
  628. init_rcu_head_on_stack(&rcu.head);
  629. init_completion(&rcu.completion);
  630. /* Will wake me after RCU finished. */
  631. call_rcu(&rcu.head, wakeme_after_rcu);
  632. /* Wait for it. */
  633. wait_for_completion(&rcu.completion);
  634. destroy_rcu_head_on_stack(&rcu.head);
  635. }
  636. EXPORT_SYMBOL_GPL(rcu_barrier);
  637. /*
  638. * synchronize_rcu - wait until a grace period has elapsed.
  639. *
  640. * Control will return to the caller some time after a full grace
  641. * period has elapsed, in other words after all currently executing RCU
  642. * read-side critical sections have completed. RCU read-side critical
  643. * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  644. * and may be nested.
  645. */
  646. void synchronize_rcu(void)
  647. {
  648. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  649. if (!rcu_scheduler_active)
  650. return;
  651. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  652. WARN_ON_ONCE(rcu_preempt_running_reader());
  653. if (!rcu_preempt_blocked_readers_any())
  654. return;
  655. /* Once we get past the fastpath checks, same code as rcu_barrier(). */
  656. rcu_barrier();
  657. }
  658. EXPORT_SYMBOL_GPL(synchronize_rcu);
  659. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  660. static unsigned long sync_rcu_preempt_exp_count;
  661. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  662. /*
  663. * Return non-zero if there are any tasks in RCU read-side critical
  664. * sections blocking the current preemptible-RCU expedited grace period.
  665. * If there is no preemptible-RCU expedited grace period currently in
  666. * progress, returns zero unconditionally.
  667. */
  668. static int rcu_preempted_readers_exp(void)
  669. {
  670. return rcu_preempt_ctrlblk.exp_tasks != NULL;
  671. }
  672. /*
  673. * Report the exit from RCU read-side critical section for the last task
  674. * that queued itself during or before the current expedited preemptible-RCU
  675. * grace period.
  676. */
  677. static void rcu_report_exp_done(void)
  678. {
  679. wake_up(&sync_rcu_preempt_exp_wq);
  680. }
  681. /*
  682. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  683. * is to rely in the fact that there is but one CPU, and that it is
  684. * illegal for a task to invoke synchronize_rcu_expedited() while in a
  685. * preemptible-RCU read-side critical section. Therefore, any such
  686. * critical sections must correspond to blocked tasks, which must therefore
  687. * be on the ->blkd_tasks list. So just record the current head of the
  688. * list in the ->exp_tasks pointer, and wait for all tasks including and
  689. * after the task pointed to by ->exp_tasks to drain.
  690. */
  691. void synchronize_rcu_expedited(void)
  692. {
  693. unsigned long flags;
  694. struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
  695. unsigned long snap;
  696. barrier(); /* ensure prior action seen before grace period. */
  697. WARN_ON_ONCE(rcu_preempt_running_reader());
  698. /*
  699. * Acquire lock so that there is only one preemptible RCU grace
  700. * period in flight. Of course, if someone does the expedited
  701. * grace period for us while we are acquiring the lock, just leave.
  702. */
  703. snap = sync_rcu_preempt_exp_count + 1;
  704. mutex_lock(&sync_rcu_preempt_exp_mutex);
  705. if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
  706. goto unlock_mb_ret; /* Others did our work for us. */
  707. local_irq_save(flags);
  708. /*
  709. * All RCU readers have to already be on blkd_tasks because
  710. * we cannot legally be executing in an RCU read-side critical
  711. * section.
  712. */
  713. /* Snapshot current head of ->blkd_tasks list. */
  714. rpcp->exp_tasks = rpcp->blkd_tasks.next;
  715. if (rpcp->exp_tasks == &rpcp->blkd_tasks)
  716. rpcp->exp_tasks = NULL;
  717. /* Wait for tail of ->blkd_tasks list to drain. */
  718. if (!rcu_preempted_readers_exp())
  719. local_irq_restore(flags);
  720. else {
  721. rcu_initiate_boost();
  722. local_irq_restore(flags);
  723. wait_event(sync_rcu_preempt_exp_wq,
  724. !rcu_preempted_readers_exp());
  725. }
  726. /* Clean up and exit. */
  727. barrier(); /* ensure expedited GP seen before counter increment. */
  728. sync_rcu_preempt_exp_count++;
  729. unlock_mb_ret:
  730. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  731. barrier(); /* ensure subsequent action seen after grace period. */
  732. }
  733. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  734. /*
  735. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  736. */
  737. int rcu_preempt_needs_cpu(void)
  738. {
  739. if (!rcu_preempt_running_reader())
  740. rcu_preempt_cpu_qs();
  741. return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
  742. }
  743. /*
  744. * Check for a task exiting while in a preemptible -RCU read-side
  745. * critical section, clean up if so. No need to issue warnings,
  746. * as debug_check_no_locks_held() already does this if lockdep
  747. * is enabled.
  748. */
  749. void exit_rcu(void)
  750. {
  751. struct task_struct *t = current;
  752. if (t->rcu_read_lock_nesting == 0)
  753. return;
  754. t->rcu_read_lock_nesting = 1;
  755. __rcu_read_unlock();
  756. }
  757. #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  758. #ifdef CONFIG_RCU_TRACE
  759. /*
  760. * Because preemptible RCU does not exist, it is not necessary to
  761. * dump out its statistics.
  762. */
  763. static void show_tiny_preempt_stats(struct seq_file *m)
  764. {
  765. }
  766. #endif /* #ifdef CONFIG_RCU_TRACE */
  767. /*
  768. * Because preemptible RCU does not exist, it is never necessary to
  769. * boost preempted RCU readers.
  770. */
  771. static int rcu_boost(void)
  772. {
  773. return 0;
  774. }
  775. /*
  776. * Because preemptible RCU does not exist, it never has any callbacks
  777. * to check.
  778. */
  779. static void rcu_preempt_check_callbacks(void)
  780. {
  781. }
  782. /*
  783. * Because preemptible RCU does not exist, it never has any callbacks
  784. * to remove.
  785. */
  786. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  787. {
  788. }
  789. /*
  790. * Because preemptible RCU does not exist, it never has any callbacks
  791. * to process.
  792. */
  793. static void rcu_preempt_process_callbacks(void)
  794. {
  795. }
  796. #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
  797. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  798. #include <linux/kernel_stat.h>
  799. /*
  800. * During boot, we forgive RCU lockdep issues. After this function is
  801. * invoked, we start taking RCU lockdep issues seriously.
  802. */
  803. void __init rcu_scheduler_starting(void)
  804. {
  805. WARN_ON(nr_context_switches() > 0);
  806. rcu_scheduler_active = 1;
  807. }
  808. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  809. #ifdef CONFIG_RCU_BOOST
  810. #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  811. #else /* #ifdef CONFIG_RCU_BOOST */
  812. #define RCU_BOOST_PRIO 1
  813. #endif /* #else #ifdef CONFIG_RCU_BOOST */
  814. #ifdef CONFIG_RCU_TRACE
  815. #ifdef CONFIG_RCU_BOOST
  816. static void rcu_initiate_boost_trace(void)
  817. {
  818. if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
  819. rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
  820. else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
  821. rcu_preempt_ctrlblk.exp_tasks == NULL)
  822. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
  823. else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
  824. rcu_preempt_ctrlblk.n_balk_boost_tasks++;
  825. else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
  826. rcu_preempt_ctrlblk.n_balk_notyet++;
  827. else
  828. rcu_preempt_ctrlblk.n_balk_nos++;
  829. }
  830. #endif /* #ifdef CONFIG_RCU_BOOST */
  831. static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
  832. {
  833. unsigned long flags;
  834. raw_local_irq_save(flags);
  835. rcp->qlen -= n;
  836. raw_local_irq_restore(flags);
  837. }
  838. /*
  839. * Dump statistics for TINY_RCU, such as they are.
  840. */
  841. static int show_tiny_stats(struct seq_file *m, void *unused)
  842. {
  843. show_tiny_preempt_stats(m);
  844. seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
  845. seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
  846. return 0;
  847. }
  848. static int show_tiny_stats_open(struct inode *inode, struct file *file)
  849. {
  850. return single_open(file, show_tiny_stats, NULL);
  851. }
  852. static const struct file_operations show_tiny_stats_fops = {
  853. .owner = THIS_MODULE,
  854. .open = show_tiny_stats_open,
  855. .read = seq_read,
  856. .llseek = seq_lseek,
  857. .release = single_release,
  858. };
  859. static struct dentry *rcudir;
  860. static int __init rcutiny_trace_init(void)
  861. {
  862. struct dentry *retval;
  863. rcudir = debugfs_create_dir("rcu", NULL);
  864. if (!rcudir)
  865. goto free_out;
  866. retval = debugfs_create_file("rcudata", 0444, rcudir,
  867. NULL, &show_tiny_stats_fops);
  868. if (!retval)
  869. goto free_out;
  870. return 0;
  871. free_out:
  872. debugfs_remove_recursive(rcudir);
  873. return 1;
  874. }
  875. static void __exit rcutiny_trace_cleanup(void)
  876. {
  877. debugfs_remove_recursive(rcudir);
  878. }
  879. module_init(rcutiny_trace_init);
  880. module_exit(rcutiny_trace_cleanup);
  881. MODULE_AUTHOR("Paul E. McKenney");
  882. MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
  883. MODULE_LICENSE("GPL");
  884. #endif /* #ifdef CONFIG_RCU_TRACE */