test-ww_mutex.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Module-based API test facility for ww_mutexes
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <linux/kthread.h>
  22. #include <linux/module.h>
  23. #include <linux/random.h>
  24. #include <linux/slab.h>
  25. #include <linux/ww_mutex.h>
  26. static DEFINE_WW_CLASS(ww_class);
  27. struct workqueue_struct *wq;
  28. struct test_mutex {
  29. struct work_struct work;
  30. struct ww_mutex mutex;
  31. struct completion ready, go, done;
  32. unsigned int flags;
  33. };
  34. #define TEST_MTX_SPIN BIT(0)
  35. #define TEST_MTX_TRY BIT(1)
  36. #define TEST_MTX_CTX BIT(2)
  37. #define __TEST_MTX_LAST BIT(3)
  38. static void test_mutex_work(struct work_struct *work)
  39. {
  40. struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
  41. complete(&mtx->ready);
  42. wait_for_completion(&mtx->go);
  43. if (mtx->flags & TEST_MTX_TRY) {
  44. while (!ww_mutex_trylock(&mtx->mutex))
  45. cond_resched();
  46. } else {
  47. ww_mutex_lock(&mtx->mutex, NULL);
  48. }
  49. complete(&mtx->done);
  50. ww_mutex_unlock(&mtx->mutex);
  51. }
  52. static int __test_mutex(unsigned int flags)
  53. {
  54. #define TIMEOUT (HZ / 16)
  55. struct test_mutex mtx;
  56. struct ww_acquire_ctx ctx;
  57. int ret;
  58. ww_mutex_init(&mtx.mutex, &ww_class);
  59. ww_acquire_init(&ctx, &ww_class);
  60. INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
  61. init_completion(&mtx.ready);
  62. init_completion(&mtx.go);
  63. init_completion(&mtx.done);
  64. mtx.flags = flags;
  65. schedule_work(&mtx.work);
  66. wait_for_completion(&mtx.ready);
  67. ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
  68. complete(&mtx.go);
  69. if (flags & TEST_MTX_SPIN) {
  70. unsigned long timeout = jiffies + TIMEOUT;
  71. ret = 0;
  72. do {
  73. if (completion_done(&mtx.done)) {
  74. ret = -EINVAL;
  75. break;
  76. }
  77. cond_resched();
  78. } while (time_before(jiffies, timeout));
  79. } else {
  80. ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
  81. }
  82. ww_mutex_unlock(&mtx.mutex);
  83. ww_acquire_fini(&ctx);
  84. if (ret) {
  85. pr_err("%s(flags=%x): mutual exclusion failure\n",
  86. __func__, flags);
  87. ret = -EINVAL;
  88. }
  89. flush_work(&mtx.work);
  90. destroy_work_on_stack(&mtx.work);
  91. return ret;
  92. #undef TIMEOUT
  93. }
  94. static int test_mutex(void)
  95. {
  96. int ret;
  97. int i;
  98. for (i = 0; i < __TEST_MTX_LAST; i++) {
  99. ret = __test_mutex(i);
  100. if (ret)
  101. return ret;
  102. }
  103. return 0;
  104. }
  105. static int test_aa(void)
  106. {
  107. struct ww_mutex mutex;
  108. struct ww_acquire_ctx ctx;
  109. int ret;
  110. ww_mutex_init(&mutex, &ww_class);
  111. ww_acquire_init(&ctx, &ww_class);
  112. ww_mutex_lock(&mutex, &ctx);
  113. if (ww_mutex_trylock(&mutex)) {
  114. pr_err("%s: trylocked itself!\n", __func__);
  115. ww_mutex_unlock(&mutex);
  116. ret = -EINVAL;
  117. goto out;
  118. }
  119. ret = ww_mutex_lock(&mutex, &ctx);
  120. if (ret != -EALREADY) {
  121. pr_err("%s: missed deadlock for recursing, ret=%d\n",
  122. __func__, ret);
  123. if (!ret)
  124. ww_mutex_unlock(&mutex);
  125. ret = -EINVAL;
  126. goto out;
  127. }
  128. ret = 0;
  129. out:
  130. ww_mutex_unlock(&mutex);
  131. ww_acquire_fini(&ctx);
  132. return ret;
  133. }
  134. struct test_abba {
  135. struct work_struct work;
  136. struct ww_mutex a_mutex;
  137. struct ww_mutex b_mutex;
  138. struct completion a_ready;
  139. struct completion b_ready;
  140. bool resolve;
  141. int result;
  142. };
  143. static void test_abba_work(struct work_struct *work)
  144. {
  145. struct test_abba *abba = container_of(work, typeof(*abba), work);
  146. struct ww_acquire_ctx ctx;
  147. int err;
  148. ww_acquire_init(&ctx, &ww_class);
  149. ww_mutex_lock(&abba->b_mutex, &ctx);
  150. complete(&abba->b_ready);
  151. wait_for_completion(&abba->a_ready);
  152. err = ww_mutex_lock(&abba->a_mutex, &ctx);
  153. if (abba->resolve && err == -EDEADLK) {
  154. ww_mutex_unlock(&abba->b_mutex);
  155. ww_mutex_lock_slow(&abba->a_mutex, &ctx);
  156. err = ww_mutex_lock(&abba->b_mutex, &ctx);
  157. }
  158. if (!err)
  159. ww_mutex_unlock(&abba->a_mutex);
  160. ww_mutex_unlock(&abba->b_mutex);
  161. ww_acquire_fini(&ctx);
  162. abba->result = err;
  163. }
  164. static int test_abba(bool resolve)
  165. {
  166. struct test_abba abba;
  167. struct ww_acquire_ctx ctx;
  168. int err, ret;
  169. ww_mutex_init(&abba.a_mutex, &ww_class);
  170. ww_mutex_init(&abba.b_mutex, &ww_class);
  171. INIT_WORK_ONSTACK(&abba.work, test_abba_work);
  172. init_completion(&abba.a_ready);
  173. init_completion(&abba.b_ready);
  174. abba.resolve = resolve;
  175. schedule_work(&abba.work);
  176. ww_acquire_init(&ctx, &ww_class);
  177. ww_mutex_lock(&abba.a_mutex, &ctx);
  178. complete(&abba.a_ready);
  179. wait_for_completion(&abba.b_ready);
  180. err = ww_mutex_lock(&abba.b_mutex, &ctx);
  181. if (resolve && err == -EDEADLK) {
  182. ww_mutex_unlock(&abba.a_mutex);
  183. ww_mutex_lock_slow(&abba.b_mutex, &ctx);
  184. err = ww_mutex_lock(&abba.a_mutex, &ctx);
  185. }
  186. if (!err)
  187. ww_mutex_unlock(&abba.b_mutex);
  188. ww_mutex_unlock(&abba.a_mutex);
  189. ww_acquire_fini(&ctx);
  190. flush_work(&abba.work);
  191. destroy_work_on_stack(&abba.work);
  192. ret = 0;
  193. if (resolve) {
  194. if (err || abba.result) {
  195. pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
  196. __func__, err, abba.result);
  197. ret = -EINVAL;
  198. }
  199. } else {
  200. if (err != -EDEADLK && abba.result != -EDEADLK) {
  201. pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
  202. __func__, err, abba.result);
  203. ret = -EINVAL;
  204. }
  205. }
  206. return ret;
  207. }
  208. struct test_cycle {
  209. struct work_struct work;
  210. struct ww_mutex a_mutex;
  211. struct ww_mutex *b_mutex;
  212. struct completion *a_signal;
  213. struct completion b_signal;
  214. int result;
  215. };
  216. static void test_cycle_work(struct work_struct *work)
  217. {
  218. struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
  219. struct ww_acquire_ctx ctx;
  220. int err, erra = 0;
  221. ww_acquire_init(&ctx, &ww_class);
  222. ww_mutex_lock(&cycle->a_mutex, &ctx);
  223. complete(cycle->a_signal);
  224. wait_for_completion(&cycle->b_signal);
  225. err = ww_mutex_lock(cycle->b_mutex, &ctx);
  226. if (err == -EDEADLK) {
  227. err = 0;
  228. ww_mutex_unlock(&cycle->a_mutex);
  229. ww_mutex_lock_slow(cycle->b_mutex, &ctx);
  230. erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
  231. }
  232. if (!err)
  233. ww_mutex_unlock(cycle->b_mutex);
  234. if (!erra)
  235. ww_mutex_unlock(&cycle->a_mutex);
  236. ww_acquire_fini(&ctx);
  237. cycle->result = err ?: erra;
  238. }
  239. static int __test_cycle(unsigned int nthreads)
  240. {
  241. struct test_cycle *cycles;
  242. unsigned int n, last = nthreads - 1;
  243. int ret;
  244. cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
  245. if (!cycles)
  246. return -ENOMEM;
  247. for (n = 0; n < nthreads; n++) {
  248. struct test_cycle *cycle = &cycles[n];
  249. ww_mutex_init(&cycle->a_mutex, &ww_class);
  250. if (n == last)
  251. cycle->b_mutex = &cycles[0].a_mutex;
  252. else
  253. cycle->b_mutex = &cycles[n + 1].a_mutex;
  254. if (n == 0)
  255. cycle->a_signal = &cycles[last].b_signal;
  256. else
  257. cycle->a_signal = &cycles[n - 1].b_signal;
  258. init_completion(&cycle->b_signal);
  259. INIT_WORK(&cycle->work, test_cycle_work);
  260. cycle->result = 0;
  261. }
  262. for (n = 0; n < nthreads; n++)
  263. queue_work(wq, &cycles[n].work);
  264. flush_workqueue(wq);
  265. ret = 0;
  266. for (n = 0; n < nthreads; n++) {
  267. struct test_cycle *cycle = &cycles[n];
  268. if (!cycle->result)
  269. continue;
  270. pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
  271. n, nthreads, cycle->result);
  272. ret = -EINVAL;
  273. break;
  274. }
  275. for (n = 0; n < nthreads; n++)
  276. ww_mutex_destroy(&cycles[n].a_mutex);
  277. kfree(cycles);
  278. return ret;
  279. }
  280. static int test_cycle(unsigned int ncpus)
  281. {
  282. unsigned int n;
  283. int ret;
  284. for (n = 2; n <= ncpus + 1; n++) {
  285. ret = __test_cycle(n);
  286. if (ret)
  287. return ret;
  288. }
  289. return 0;
  290. }
  291. struct stress {
  292. struct work_struct work;
  293. struct ww_mutex *locks;
  294. unsigned long timeout;
  295. int nlocks;
  296. };
  297. static int *get_random_order(int count)
  298. {
  299. int *order;
  300. int n, r, tmp;
  301. order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
  302. if (!order)
  303. return order;
  304. for (n = 0; n < count; n++)
  305. order[n] = n;
  306. for (n = count - 1; n > 1; n--) {
  307. r = get_random_int() % (n + 1);
  308. if (r != n) {
  309. tmp = order[n];
  310. order[n] = order[r];
  311. order[r] = tmp;
  312. }
  313. }
  314. return order;
  315. }
  316. static void dummy_load(struct stress *stress)
  317. {
  318. usleep_range(1000, 2000);
  319. }
  320. static void stress_inorder_work(struct work_struct *work)
  321. {
  322. struct stress *stress = container_of(work, typeof(*stress), work);
  323. const int nlocks = stress->nlocks;
  324. struct ww_mutex *locks = stress->locks;
  325. struct ww_acquire_ctx ctx;
  326. int *order;
  327. order = get_random_order(nlocks);
  328. if (!order)
  329. return;
  330. do {
  331. int contended = -1;
  332. int n, err;
  333. ww_acquire_init(&ctx, &ww_class);
  334. retry:
  335. err = 0;
  336. for (n = 0; n < nlocks; n++) {
  337. if (n == contended)
  338. continue;
  339. err = ww_mutex_lock(&locks[order[n]], &ctx);
  340. if (err < 0)
  341. break;
  342. }
  343. if (!err)
  344. dummy_load(stress);
  345. if (contended > n)
  346. ww_mutex_unlock(&locks[order[contended]]);
  347. contended = n;
  348. while (n--)
  349. ww_mutex_unlock(&locks[order[n]]);
  350. if (err == -EDEADLK) {
  351. ww_mutex_lock_slow(&locks[order[contended]], &ctx);
  352. goto retry;
  353. }
  354. if (err) {
  355. pr_err_once("stress (%s) failed with %d\n",
  356. __func__, err);
  357. break;
  358. }
  359. ww_acquire_fini(&ctx);
  360. } while (!time_after(jiffies, stress->timeout));
  361. kfree(order);
  362. kfree(stress);
  363. }
  364. struct reorder_lock {
  365. struct list_head link;
  366. struct ww_mutex *lock;
  367. };
  368. static void stress_reorder_work(struct work_struct *work)
  369. {
  370. struct stress *stress = container_of(work, typeof(*stress), work);
  371. LIST_HEAD(locks);
  372. struct ww_acquire_ctx ctx;
  373. struct reorder_lock *ll, *ln;
  374. int *order;
  375. int n, err;
  376. order = get_random_order(stress->nlocks);
  377. if (!order)
  378. return;
  379. for (n = 0; n < stress->nlocks; n++) {
  380. ll = kmalloc(sizeof(*ll), GFP_KERNEL);
  381. if (!ll)
  382. goto out;
  383. ll->lock = &stress->locks[order[n]];
  384. list_add(&ll->link, &locks);
  385. }
  386. kfree(order);
  387. order = NULL;
  388. do {
  389. ww_acquire_init(&ctx, &ww_class);
  390. list_for_each_entry(ll, &locks, link) {
  391. err = ww_mutex_lock(ll->lock, &ctx);
  392. if (!err)
  393. continue;
  394. ln = ll;
  395. list_for_each_entry_continue_reverse(ln, &locks, link)
  396. ww_mutex_unlock(ln->lock);
  397. if (err != -EDEADLK) {
  398. pr_err_once("stress (%s) failed with %d\n",
  399. __func__, err);
  400. break;
  401. }
  402. ww_mutex_lock_slow(ll->lock, &ctx);
  403. list_move(&ll->link, &locks); /* restarts iteration */
  404. }
  405. dummy_load(stress);
  406. list_for_each_entry(ll, &locks, link)
  407. ww_mutex_unlock(ll->lock);
  408. ww_acquire_fini(&ctx);
  409. } while (!time_after(jiffies, stress->timeout));
  410. out:
  411. list_for_each_entry_safe(ll, ln, &locks, link)
  412. kfree(ll);
  413. kfree(order);
  414. kfree(stress);
  415. }
  416. static void stress_one_work(struct work_struct *work)
  417. {
  418. struct stress *stress = container_of(work, typeof(*stress), work);
  419. const int nlocks = stress->nlocks;
  420. struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
  421. int err;
  422. do {
  423. err = ww_mutex_lock(lock, NULL);
  424. if (!err) {
  425. dummy_load(stress);
  426. ww_mutex_unlock(lock);
  427. } else {
  428. pr_err_once("stress (%s) failed with %d\n",
  429. __func__, err);
  430. break;
  431. }
  432. } while (!time_after(jiffies, stress->timeout));
  433. kfree(stress);
  434. }
  435. #define STRESS_INORDER BIT(0)
  436. #define STRESS_REORDER BIT(1)
  437. #define STRESS_ONE BIT(2)
  438. #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
  439. static int stress(int nlocks, int nthreads, unsigned int flags)
  440. {
  441. struct ww_mutex *locks;
  442. int n;
  443. locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
  444. if (!locks)
  445. return -ENOMEM;
  446. for (n = 0; n < nlocks; n++)
  447. ww_mutex_init(&locks[n], &ww_class);
  448. for (n = 0; nthreads; n++) {
  449. struct stress *stress;
  450. void (*fn)(struct work_struct *work);
  451. fn = NULL;
  452. switch (n & 3) {
  453. case 0:
  454. if (flags & STRESS_INORDER)
  455. fn = stress_inorder_work;
  456. break;
  457. case 1:
  458. if (flags & STRESS_REORDER)
  459. fn = stress_reorder_work;
  460. break;
  461. case 2:
  462. if (flags & STRESS_ONE)
  463. fn = stress_one_work;
  464. break;
  465. }
  466. if (!fn)
  467. continue;
  468. stress = kmalloc(sizeof(*stress), GFP_KERNEL);
  469. if (!stress)
  470. break;
  471. INIT_WORK(&stress->work, fn);
  472. stress->locks = locks;
  473. stress->nlocks = nlocks;
  474. stress->timeout = jiffies + 2*HZ;
  475. queue_work(wq, &stress->work);
  476. nthreads--;
  477. }
  478. flush_workqueue(wq);
  479. for (n = 0; n < nlocks; n++)
  480. ww_mutex_destroy(&locks[n]);
  481. kfree(locks);
  482. return 0;
  483. }
  484. static int __init test_ww_mutex_init(void)
  485. {
  486. int ncpus = num_online_cpus();
  487. int ret;
  488. wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
  489. if (!wq)
  490. return -ENOMEM;
  491. ret = test_mutex();
  492. if (ret)
  493. return ret;
  494. ret = test_aa();
  495. if (ret)
  496. return ret;
  497. ret = test_abba(false);
  498. if (ret)
  499. return ret;
  500. ret = test_abba(true);
  501. if (ret)
  502. return ret;
  503. ret = test_cycle(ncpus);
  504. if (ret)
  505. return ret;
  506. ret = stress(16, 2*ncpus, STRESS_INORDER);
  507. if (ret)
  508. return ret;
  509. ret = stress(16, 2*ncpus, STRESS_REORDER);
  510. if (ret)
  511. return ret;
  512. ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
  513. if (ret)
  514. return ret;
  515. return 0;
  516. }
  517. static void __exit test_ww_mutex_exit(void)
  518. {
  519. destroy_workqueue(wq);
  520. }
  521. module_init(test_ww_mutex_init);
  522. module_exit(test_ww_mutex_exit);
  523. MODULE_LICENSE("GPL");
  524. MODULE_AUTHOR("Intel Corporation");