jump_label.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /*
  2. * jump label support
  3. *
  4. * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5. * Copyright (C) 2011 Peter Zijlstra
  6. *
  7. */
  8. #include <linux/memory.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/slab.h>
  13. #include <linux/sort.h>
  14. #include <linux/err.h>
  15. #include <linux/static_key.h>
  16. #include <linux/jump_label_ratelimit.h>
  17. #include <linux/bug.h>
  18. #include <linux/cpu.h>
  19. #ifdef HAVE_JUMP_LABEL
  20. /* mutex to protect coming/going of the the jump_label table */
  21. static DEFINE_MUTEX(jump_label_mutex);
  22. void jump_label_lock(void)
  23. {
  24. mutex_lock(&jump_label_mutex);
  25. }
  26. void jump_label_unlock(void)
  27. {
  28. mutex_unlock(&jump_label_mutex);
  29. }
  30. static int jump_label_cmp(const void *a, const void *b)
  31. {
  32. const struct jump_entry *jea = a;
  33. const struct jump_entry *jeb = b;
  34. if (jea->key < jeb->key)
  35. return -1;
  36. if (jea->key > jeb->key)
  37. return 1;
  38. return 0;
  39. }
  40. static void
  41. jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
  42. {
  43. unsigned long size;
  44. size = (((unsigned long)stop - (unsigned long)start)
  45. / sizeof(struct jump_entry));
  46. sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
  47. }
  48. static void jump_label_update(struct static_key *key);
  49. /*
  50. * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
  51. * The use of 'atomic_read()' requires atomic.h and its problematic for some
  52. * kernel headers such as kernel.h and others. Since static_key_count() is not
  53. * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
  54. * to have it be a function here. Similarly, for 'static_key_enable()' and
  55. * 'static_key_disable()', which require bug.h. This should allow jump_label.h
  56. * to be included from most/all places for HAVE_JUMP_LABEL.
  57. */
  58. int static_key_count(struct static_key *key)
  59. {
  60. /*
  61. * -1 means the first static_key_slow_inc() is in progress.
  62. * static_key_enabled() must return true, so return 1 here.
  63. */
  64. int n = atomic_read(&key->enabled);
  65. return n >= 0 ? n : 1;
  66. }
  67. EXPORT_SYMBOL_GPL(static_key_count);
  68. void static_key_slow_inc_cpuslocked(struct static_key *key)
  69. {
  70. int v, v1;
  71. STATIC_KEY_CHECK_USE();
  72. /*
  73. * Careful if we get concurrent static_key_slow_inc() calls;
  74. * later calls must wait for the first one to _finish_ the
  75. * jump_label_update() process. At the same time, however,
  76. * the jump_label_update() call below wants to see
  77. * static_key_enabled(&key) for jumps to be updated properly.
  78. *
  79. * So give a special meaning to negative key->enabled: it sends
  80. * static_key_slow_inc() down the slow path, and it is non-zero
  81. * so it counts as "enabled" in jump_label_update(). Note that
  82. * atomic_inc_unless_negative() checks >= 0, so roll our own.
  83. */
  84. for (v = atomic_read(&key->enabled); v > 0; v = v1) {
  85. v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
  86. if (likely(v1 == v))
  87. return;
  88. }
  89. jump_label_lock();
  90. if (atomic_read(&key->enabled) == 0) {
  91. atomic_set(&key->enabled, -1);
  92. jump_label_update(key);
  93. /*
  94. * Ensure that if the above cmpxchg loop observes our positive
  95. * value, it must also observe all the text changes.
  96. */
  97. atomic_set_release(&key->enabled, 1);
  98. } else {
  99. atomic_inc(&key->enabled);
  100. }
  101. jump_label_unlock();
  102. }
  103. void static_key_slow_inc(struct static_key *key)
  104. {
  105. cpus_read_lock();
  106. static_key_slow_inc_cpuslocked(key);
  107. cpus_read_unlock();
  108. }
  109. EXPORT_SYMBOL_GPL(static_key_slow_inc);
  110. void static_key_enable_cpuslocked(struct static_key *key)
  111. {
  112. STATIC_KEY_CHECK_USE();
  113. if (atomic_read(&key->enabled) > 0) {
  114. WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
  115. return;
  116. }
  117. jump_label_lock();
  118. if (atomic_read(&key->enabled) == 0) {
  119. atomic_set(&key->enabled, -1);
  120. jump_label_update(key);
  121. /*
  122. * See static_key_slow_inc().
  123. */
  124. atomic_set_release(&key->enabled, 1);
  125. }
  126. jump_label_unlock();
  127. }
  128. EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
  129. void static_key_enable(struct static_key *key)
  130. {
  131. cpus_read_lock();
  132. static_key_enable_cpuslocked(key);
  133. cpus_read_unlock();
  134. }
  135. EXPORT_SYMBOL_GPL(static_key_enable);
  136. void static_key_disable_cpuslocked(struct static_key *key)
  137. {
  138. STATIC_KEY_CHECK_USE();
  139. if (atomic_read(&key->enabled) != 1) {
  140. WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
  141. return;
  142. }
  143. jump_label_lock();
  144. if (atomic_cmpxchg(&key->enabled, 1, 0))
  145. jump_label_update(key);
  146. jump_label_unlock();
  147. }
  148. EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
  149. void static_key_disable(struct static_key *key)
  150. {
  151. cpus_read_lock();
  152. static_key_disable_cpuslocked(key);
  153. cpus_read_unlock();
  154. }
  155. EXPORT_SYMBOL_GPL(static_key_disable);
  156. static void __static_key_slow_dec_cpuslocked(struct static_key *key,
  157. unsigned long rate_limit,
  158. struct delayed_work *work)
  159. {
  160. /*
  161. * The negative count check is valid even when a negative
  162. * key->enabled is in use by static_key_slow_inc(); a
  163. * __static_key_slow_dec() before the first static_key_slow_inc()
  164. * returns is unbalanced, because all other static_key_slow_inc()
  165. * instances block while the update is in progress.
  166. */
  167. if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
  168. WARN(atomic_read(&key->enabled) < 0,
  169. "jump label: negative count!\n");
  170. return;
  171. }
  172. if (rate_limit) {
  173. atomic_inc(&key->enabled);
  174. schedule_delayed_work(work, rate_limit);
  175. } else {
  176. jump_label_update(key);
  177. }
  178. jump_label_unlock();
  179. }
  180. static void __static_key_slow_dec(struct static_key *key,
  181. unsigned long rate_limit,
  182. struct delayed_work *work)
  183. {
  184. cpus_read_lock();
  185. __static_key_slow_dec_cpuslocked(key, rate_limit, work);
  186. cpus_read_unlock();
  187. }
  188. static void jump_label_update_timeout(struct work_struct *work)
  189. {
  190. struct static_key_deferred *key =
  191. container_of(work, struct static_key_deferred, work.work);
  192. __static_key_slow_dec(&key->key, 0, NULL);
  193. }
  194. void static_key_slow_dec(struct static_key *key)
  195. {
  196. STATIC_KEY_CHECK_USE();
  197. __static_key_slow_dec(key, 0, NULL);
  198. }
  199. EXPORT_SYMBOL_GPL(static_key_slow_dec);
  200. void static_key_slow_dec_cpuslocked(struct static_key *key)
  201. {
  202. STATIC_KEY_CHECK_USE();
  203. __static_key_slow_dec_cpuslocked(key, 0, NULL);
  204. }
  205. void static_key_slow_dec_deferred(struct static_key_deferred *key)
  206. {
  207. STATIC_KEY_CHECK_USE();
  208. __static_key_slow_dec(&key->key, key->timeout, &key->work);
  209. }
  210. EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
  211. void static_key_deferred_flush(struct static_key_deferred *key)
  212. {
  213. STATIC_KEY_CHECK_USE();
  214. flush_delayed_work(&key->work);
  215. }
  216. EXPORT_SYMBOL_GPL(static_key_deferred_flush);
  217. void jump_label_rate_limit(struct static_key_deferred *key,
  218. unsigned long rl)
  219. {
  220. STATIC_KEY_CHECK_USE();
  221. key->timeout = rl;
  222. INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
  223. }
  224. EXPORT_SYMBOL_GPL(jump_label_rate_limit);
  225. static int addr_conflict(struct jump_entry *entry, void *start, void *end)
  226. {
  227. if (entry->code <= (unsigned long)end &&
  228. entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
  229. return 1;
  230. return 0;
  231. }
  232. static int __jump_label_text_reserved(struct jump_entry *iter_start,
  233. struct jump_entry *iter_stop, void *start, void *end)
  234. {
  235. struct jump_entry *iter;
  236. iter = iter_start;
  237. while (iter < iter_stop) {
  238. if (addr_conflict(iter, start, end))
  239. return 1;
  240. iter++;
  241. }
  242. return 0;
  243. }
  244. /*
  245. * Update code which is definitely not currently executing.
  246. * Architectures which need heavyweight synchronization to modify
  247. * running code can override this to make the non-live update case
  248. * cheaper.
  249. */
  250. void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
  251. enum jump_label_type type)
  252. {
  253. arch_jump_label_transform(entry, type);
  254. }
  255. static inline struct jump_entry *static_key_entries(struct static_key *key)
  256. {
  257. WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
  258. return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
  259. }
  260. static inline bool static_key_type(struct static_key *key)
  261. {
  262. return key->type & JUMP_TYPE_TRUE;
  263. }
  264. static inline bool static_key_linked(struct static_key *key)
  265. {
  266. return key->type & JUMP_TYPE_LINKED;
  267. }
  268. static inline void static_key_clear_linked(struct static_key *key)
  269. {
  270. key->type &= ~JUMP_TYPE_LINKED;
  271. }
  272. static inline void static_key_set_linked(struct static_key *key)
  273. {
  274. key->type |= JUMP_TYPE_LINKED;
  275. }
  276. static inline struct static_key *jump_entry_key(struct jump_entry *entry)
  277. {
  278. return (struct static_key *)((unsigned long)entry->key & ~1UL);
  279. }
  280. static bool jump_entry_branch(struct jump_entry *entry)
  281. {
  282. return (unsigned long)entry->key & 1UL;
  283. }
  284. /***
  285. * A 'struct static_key' uses a union such that it either points directly
  286. * to a table of 'struct jump_entry' or to a linked list of modules which in
  287. * turn point to 'struct jump_entry' tables.
  288. *
  289. * The two lower bits of the pointer are used to keep track of which pointer
  290. * type is in use and to store the initial branch direction, we use an access
  291. * function which preserves these bits.
  292. */
  293. static void static_key_set_entries(struct static_key *key,
  294. struct jump_entry *entries)
  295. {
  296. unsigned long type;
  297. WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
  298. type = key->type & JUMP_TYPE_MASK;
  299. key->entries = entries;
  300. key->type |= type;
  301. }
  302. static enum jump_label_type jump_label_type(struct jump_entry *entry)
  303. {
  304. struct static_key *key = jump_entry_key(entry);
  305. bool enabled = static_key_enabled(key);
  306. bool branch = jump_entry_branch(entry);
  307. /* See the comment in linux/jump_label.h */
  308. return enabled ^ branch;
  309. }
  310. static void __jump_label_update(struct static_key *key,
  311. struct jump_entry *entry,
  312. struct jump_entry *stop)
  313. {
  314. for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
  315. /*
  316. * entry->code set to 0 invalidates module init text sections
  317. * kernel_text_address() verifies we are not in core kernel
  318. * init code, see jump_label_invalidate_module_init().
  319. */
  320. if (entry->code && kernel_text_address(entry->code))
  321. arch_jump_label_transform(entry, jump_label_type(entry));
  322. }
  323. }
  324. void __init jump_label_init(void)
  325. {
  326. struct jump_entry *iter_start = __start___jump_table;
  327. struct jump_entry *iter_stop = __stop___jump_table;
  328. struct static_key *key = NULL;
  329. struct jump_entry *iter;
  330. /*
  331. * Since we are initializing the static_key.enabled field with
  332. * with the 'raw' int values (to avoid pulling in atomic.h) in
  333. * jump_label.h, let's make sure that is safe. There are only two
  334. * cases to check since we initialize to 0 or 1.
  335. */
  336. BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
  337. BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
  338. if (static_key_initialized)
  339. return;
  340. cpus_read_lock();
  341. jump_label_lock();
  342. jump_label_sort_entries(iter_start, iter_stop);
  343. for (iter = iter_start; iter < iter_stop; iter++) {
  344. struct static_key *iterk;
  345. /* rewrite NOPs */
  346. if (jump_label_type(iter) == JUMP_LABEL_NOP)
  347. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  348. iterk = jump_entry_key(iter);
  349. if (iterk == key)
  350. continue;
  351. key = iterk;
  352. static_key_set_entries(key, iter);
  353. }
  354. static_key_initialized = true;
  355. jump_label_unlock();
  356. cpus_read_unlock();
  357. }
  358. #ifdef CONFIG_MODULES
  359. static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
  360. {
  361. struct static_key *key = jump_entry_key(entry);
  362. bool type = static_key_type(key);
  363. bool branch = jump_entry_branch(entry);
  364. /* See the comment in linux/jump_label.h */
  365. return type ^ branch;
  366. }
  367. struct static_key_mod {
  368. struct static_key_mod *next;
  369. struct jump_entry *entries;
  370. struct module *mod;
  371. };
  372. static inline struct static_key_mod *static_key_mod(struct static_key *key)
  373. {
  374. WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
  375. return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
  376. }
  377. /***
  378. * key->type and key->next are the same via union.
  379. * This sets key->next and preserves the type bits.
  380. *
  381. * See additional comments above static_key_set_entries().
  382. */
  383. static void static_key_set_mod(struct static_key *key,
  384. struct static_key_mod *mod)
  385. {
  386. unsigned long type;
  387. WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
  388. type = key->type & JUMP_TYPE_MASK;
  389. key->next = mod;
  390. key->type |= type;
  391. }
  392. static int __jump_label_mod_text_reserved(void *start, void *end)
  393. {
  394. struct module *mod;
  395. preempt_disable();
  396. mod = __module_text_address((unsigned long)start);
  397. WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
  398. preempt_enable();
  399. if (!mod)
  400. return 0;
  401. return __jump_label_text_reserved(mod->jump_entries,
  402. mod->jump_entries + mod->num_jump_entries,
  403. start, end);
  404. }
  405. static void __jump_label_mod_update(struct static_key *key)
  406. {
  407. struct static_key_mod *mod;
  408. for (mod = static_key_mod(key); mod; mod = mod->next) {
  409. struct jump_entry *stop;
  410. struct module *m;
  411. /*
  412. * NULL if the static_key is defined in a module
  413. * that does not use it
  414. */
  415. if (!mod->entries)
  416. continue;
  417. m = mod->mod;
  418. if (!m)
  419. stop = __stop___jump_table;
  420. else
  421. stop = m->jump_entries + m->num_jump_entries;
  422. __jump_label_update(key, mod->entries, stop);
  423. }
  424. }
  425. /***
  426. * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
  427. * @mod: module to patch
  428. *
  429. * Allow for run-time selection of the optimal nops. Before the module
  430. * loads patch these with arch_get_jump_label_nop(), which is specified by
  431. * the arch specific jump label code.
  432. */
  433. void jump_label_apply_nops(struct module *mod)
  434. {
  435. struct jump_entry *iter_start = mod->jump_entries;
  436. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  437. struct jump_entry *iter;
  438. /* if the module doesn't have jump label entries, just return */
  439. if (iter_start == iter_stop)
  440. return;
  441. for (iter = iter_start; iter < iter_stop; iter++) {
  442. /* Only write NOPs for arch_branch_static(). */
  443. if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
  444. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  445. }
  446. }
  447. static int jump_label_add_module(struct module *mod)
  448. {
  449. struct jump_entry *iter_start = mod->jump_entries;
  450. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  451. struct jump_entry *iter;
  452. struct static_key *key = NULL;
  453. struct static_key_mod *jlm, *jlm2;
  454. /* if the module doesn't have jump label entries, just return */
  455. if (iter_start == iter_stop)
  456. return 0;
  457. jump_label_sort_entries(iter_start, iter_stop);
  458. for (iter = iter_start; iter < iter_stop; iter++) {
  459. struct static_key *iterk;
  460. iterk = jump_entry_key(iter);
  461. if (iterk == key)
  462. continue;
  463. key = iterk;
  464. if (within_module(iter->key, mod)) {
  465. static_key_set_entries(key, iter);
  466. continue;
  467. }
  468. jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
  469. if (!jlm)
  470. return -ENOMEM;
  471. if (!static_key_linked(key)) {
  472. jlm2 = kzalloc(sizeof(struct static_key_mod),
  473. GFP_KERNEL);
  474. if (!jlm2) {
  475. kfree(jlm);
  476. return -ENOMEM;
  477. }
  478. preempt_disable();
  479. jlm2->mod = __module_address((unsigned long)key);
  480. preempt_enable();
  481. jlm2->entries = static_key_entries(key);
  482. jlm2->next = NULL;
  483. static_key_set_mod(key, jlm2);
  484. static_key_set_linked(key);
  485. }
  486. jlm->mod = mod;
  487. jlm->entries = iter;
  488. jlm->next = static_key_mod(key);
  489. static_key_set_mod(key, jlm);
  490. static_key_set_linked(key);
  491. /* Only update if we've changed from our initial state */
  492. if (jump_label_type(iter) != jump_label_init_type(iter))
  493. __jump_label_update(key, iter, iter_stop);
  494. }
  495. return 0;
  496. }
  497. static void jump_label_del_module(struct module *mod)
  498. {
  499. struct jump_entry *iter_start = mod->jump_entries;
  500. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  501. struct jump_entry *iter;
  502. struct static_key *key = NULL;
  503. struct static_key_mod *jlm, **prev;
  504. for (iter = iter_start; iter < iter_stop; iter++) {
  505. if (jump_entry_key(iter) == key)
  506. continue;
  507. key = jump_entry_key(iter);
  508. if (within_module(iter->key, mod))
  509. continue;
  510. /* No memory during module load */
  511. if (WARN_ON(!static_key_linked(key)))
  512. continue;
  513. prev = &key->next;
  514. jlm = static_key_mod(key);
  515. while (jlm && jlm->mod != mod) {
  516. prev = &jlm->next;
  517. jlm = jlm->next;
  518. }
  519. /* No memory during module load */
  520. if (WARN_ON(!jlm))
  521. continue;
  522. if (prev == &key->next)
  523. static_key_set_mod(key, jlm->next);
  524. else
  525. *prev = jlm->next;
  526. kfree(jlm);
  527. jlm = static_key_mod(key);
  528. /* if only one etry is left, fold it back into the static_key */
  529. if (jlm->next == NULL) {
  530. static_key_set_entries(key, jlm->entries);
  531. static_key_clear_linked(key);
  532. kfree(jlm);
  533. }
  534. }
  535. }
  536. static void jump_label_invalidate_module_init(struct module *mod)
  537. {
  538. struct jump_entry *iter_start = mod->jump_entries;
  539. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  540. struct jump_entry *iter;
  541. for (iter = iter_start; iter < iter_stop; iter++) {
  542. if (within_module_init(iter->code, mod))
  543. iter->code = 0;
  544. }
  545. }
  546. static int
  547. jump_label_module_notify(struct notifier_block *self, unsigned long val,
  548. void *data)
  549. {
  550. struct module *mod = data;
  551. int ret = 0;
  552. cpus_read_lock();
  553. jump_label_lock();
  554. switch (val) {
  555. case MODULE_STATE_COMING:
  556. ret = jump_label_add_module(mod);
  557. if (ret) {
  558. WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
  559. jump_label_del_module(mod);
  560. }
  561. break;
  562. case MODULE_STATE_GOING:
  563. jump_label_del_module(mod);
  564. break;
  565. case MODULE_STATE_LIVE:
  566. jump_label_invalidate_module_init(mod);
  567. break;
  568. }
  569. jump_label_unlock();
  570. cpus_read_unlock();
  571. return notifier_from_errno(ret);
  572. }
  573. static struct notifier_block jump_label_module_nb = {
  574. .notifier_call = jump_label_module_notify,
  575. .priority = 1, /* higher than tracepoints */
  576. };
  577. static __init int jump_label_init_module(void)
  578. {
  579. return register_module_notifier(&jump_label_module_nb);
  580. }
  581. early_initcall(jump_label_init_module);
  582. #endif /* CONFIG_MODULES */
  583. /***
  584. * jump_label_text_reserved - check if addr range is reserved
  585. * @start: start text addr
  586. * @end: end text addr
  587. *
  588. * checks if the text addr located between @start and @end
  589. * overlaps with any of the jump label patch addresses. Code
  590. * that wants to modify kernel text should first verify that
  591. * it does not overlap with any of the jump label addresses.
  592. * Caller must hold jump_label_mutex.
  593. *
  594. * returns 1 if there is an overlap, 0 otherwise
  595. */
  596. int jump_label_text_reserved(void *start, void *end)
  597. {
  598. int ret = __jump_label_text_reserved(__start___jump_table,
  599. __stop___jump_table, start, end);
  600. if (ret)
  601. return ret;
  602. #ifdef CONFIG_MODULES
  603. ret = __jump_label_mod_text_reserved(start, end);
  604. #endif
  605. return ret;
  606. }
  607. static void jump_label_update(struct static_key *key)
  608. {
  609. struct jump_entry *stop = __stop___jump_table;
  610. struct jump_entry *entry;
  611. #ifdef CONFIG_MODULES
  612. struct module *mod;
  613. if (static_key_linked(key)) {
  614. __jump_label_mod_update(key);
  615. return;
  616. }
  617. preempt_disable();
  618. mod = __module_address((unsigned long)key);
  619. if (mod)
  620. stop = mod->jump_entries + mod->num_jump_entries;
  621. preempt_enable();
  622. #endif
  623. entry = static_key_entries(key);
  624. /* if there are no users, entry can be NULL */
  625. if (entry)
  626. __jump_label_update(key, entry, stop);
  627. }
  628. #ifdef CONFIG_STATIC_KEYS_SELFTEST
  629. static DEFINE_STATIC_KEY_TRUE(sk_true);
  630. static DEFINE_STATIC_KEY_FALSE(sk_false);
  631. static __init int jump_label_test(void)
  632. {
  633. int i;
  634. for (i = 0; i < 2; i++) {
  635. WARN_ON(static_key_enabled(&sk_true.key) != true);
  636. WARN_ON(static_key_enabled(&sk_false.key) != false);
  637. WARN_ON(!static_branch_likely(&sk_true));
  638. WARN_ON(!static_branch_unlikely(&sk_true));
  639. WARN_ON(static_branch_likely(&sk_false));
  640. WARN_ON(static_branch_unlikely(&sk_false));
  641. static_branch_disable(&sk_true);
  642. static_branch_enable(&sk_false);
  643. WARN_ON(static_key_enabled(&sk_true.key) == true);
  644. WARN_ON(static_key_enabled(&sk_false.key) == false);
  645. WARN_ON(static_branch_likely(&sk_true));
  646. WARN_ON(static_branch_unlikely(&sk_true));
  647. WARN_ON(!static_branch_likely(&sk_false));
  648. WARN_ON(!static_branch_unlikely(&sk_false));
  649. static_branch_enable(&sk_true);
  650. static_branch_disable(&sk_false);
  651. }
  652. return 0;
  653. }
  654. early_initcall(jump_label_test);
  655. #endif /* STATIC_KEYS_SELFTEST */
  656. #endif /* HAVE_JUMP_LABEL */