test-iosched.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * The test scheduler allows to test the block device by dispatching
  13. * specific requests according to the test case and declare PASS/FAIL
  14. * according to the requests completion error code.
  15. * Each test is exposed via debugfs and can be triggered by writing to
  16. * the debugfs file.
  17. *
  18. */
  19. /* elevator test iosched */
  20. #include <linux/blkdev.h>
  21. #include <linux/elevator.h>
  22. #include <linux/bio.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/debugfs.h>
  27. #include <linux/test-iosched.h>
  28. #include <linux/delay.h>
  29. #include "blk.h"
  30. #define MODULE_NAME "test-iosched"
  31. #define WR_RD_START_REQ_ID 1234
  32. #define UNIQUE_START_REQ_ID 5678
  33. #define TIMEOUT_TIMER_MS 40000
  34. #define TEST_MAX_TESTCASE_ROUNDS 15
  35. #define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
  36. #define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
  37. #define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
  38. static DEFINE_SPINLOCK(blk_dev_test_list_lock);
  39. static LIST_HEAD(blk_dev_test_list);
  40. static struct test_data *ptd;
  41. /**
  42. * test_iosched_get_req_queue() - returns the request queue
  43. * served by the scheduler
  44. */
  45. struct request_queue *test_iosched_get_req_queue(void)
  46. {
  47. if (!ptd)
  48. return NULL;
  49. return ptd->req_q;
  50. }
  51. EXPORT_SYMBOL(test_iosched_get_req_queue);
  52. /**
  53. * test_iosched_mark_test_completion() - Wakeup the debugfs
  54. * thread, waiting on the test completion
  55. */
  56. void test_iosched_mark_test_completion(void)
  57. {
  58. if (!ptd)
  59. return;
  60. test_pr_info("%s: mark test is completed, test_count=%d,",
  61. __func__, ptd->test_count);
  62. test_pr_info("%s: reinsert_count=%d, dispatched_count=%d",
  63. __func__, ptd->reinsert_count, ptd->dispatched_count);
  64. ptd->test_state = TEST_COMPLETED;
  65. wake_up(&ptd->wait_q);
  66. }
  67. EXPORT_SYMBOL(test_iosched_mark_test_completion);
  68. /**
  69. * check_test_completion() - Check if all the queued test
  70. * requests were completed
  71. */
  72. void check_test_completion(void)
  73. {
  74. struct test_request *test_rq;
  75. if (!ptd)
  76. goto exit;
  77. list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist)
  78. if (!test_rq->req_completed)
  79. goto exit;
  80. if (!list_empty(&ptd->test_queue)
  81. || !list_empty(&ptd->reinsert_queue)
  82. || !list_empty(&ptd->urgent_queue)) {
  83. test_pr_info("%s: Test still not completed,", __func__);
  84. test_pr_info("%s: test_count=%d, reinsert_count=%d",
  85. __func__, ptd->test_count, ptd->reinsert_count);
  86. test_pr_info("%s: dispatched_count=%d, urgent_count=%d",
  87. __func__, ptd->dispatched_count, ptd->urgent_count);
  88. goto exit;
  89. }
  90. ptd->test_info.test_duration = ktime_sub(ktime_get(),
  91. ptd->test_info.test_duration);
  92. test_pr_info("%s: Test is completed, test_count=%d, reinsert_count=%d,",
  93. __func__, ptd->test_count, ptd->reinsert_count);
  94. test_pr_info("%s: dispatched_count=%d",
  95. __func__, ptd->dispatched_count);
  96. test_iosched_mark_test_completion();
  97. exit:
  98. return;
  99. }
  100. EXPORT_SYMBOL(check_test_completion);
  101. /*
  102. * A callback to be called per bio completion.
  103. * Frees the bio memory.
  104. */
  105. static void end_test_bio(struct bio *bio, int err)
  106. {
  107. if (err)
  108. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  109. bio_put(bio);
  110. }
  111. /*
  112. * A callback to be called per request completion.
  113. * the request memory is not freed here, will be freed later after the test
  114. * results checking.
  115. */
  116. static void end_test_req(struct request *rq, int err)
  117. {
  118. struct test_request *test_rq;
  119. test_rq = (struct test_request *)rq->elv.priv[0];
  120. BUG_ON(!test_rq);
  121. test_pr_debug("%s: request %d completed, err=%d",
  122. __func__, test_rq->req_id, err);
  123. test_rq->req_completed = true;
  124. test_rq->req_result = err;
  125. check_test_completion();
  126. }
  127. /**
  128. * test_iosched_add_unique_test_req() - Create and queue a non
  129. * read/write request (such as FLUSH/DISCRAD/SANITIZE).
  130. * @is_err_expcted: A flag to indicate if this request
  131. * should succeed or not
  132. * @req_unique: The type of request to add
  133. * @start_sec: start address of the first bio
  134. * @nr_sects: number of sectors in the request
  135. * @end_req_io: specific completion callback. When not
  136. * set, the defaulcallback will be used
  137. */
  138. int test_iosched_add_unique_test_req(int is_err_expcted,
  139. enum req_unique_type req_unique,
  140. int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
  141. {
  142. struct bio *bio;
  143. struct request *rq;
  144. int rw_flags;
  145. struct test_request *test_rq;
  146. if (!ptd)
  147. return -ENODEV;
  148. bio = bio_alloc(GFP_KERNEL, 0);
  149. if (!bio) {
  150. test_pr_err("%s: Failed to allocate a bio", __func__);
  151. return -ENODEV;
  152. }
  153. bio_get(bio);
  154. bio->bi_end_io = end_test_bio;
  155. switch (req_unique) {
  156. case REQ_UNIQUE_FLUSH:
  157. bio->bi_rw = WRITE_FLUSH;
  158. break;
  159. case REQ_UNIQUE_DISCARD:
  160. bio->bi_rw = REQ_WRITE | REQ_DISCARD;
  161. bio->bi_size = nr_sects << 9;
  162. bio->bi_sector = start_sec;
  163. break;
  164. case REQ_UNIQUE_SANITIZE:
  165. bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
  166. break;
  167. default:
  168. test_pr_err("%s: Invalid request type %d", __func__,
  169. req_unique);
  170. bio_put(bio);
  171. return -ENODEV;
  172. }
  173. rw_flags = bio_data_dir(bio);
  174. if (bio->bi_rw & REQ_SYNC)
  175. rw_flags |= REQ_SYNC;
  176. rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
  177. if (!rq) {
  178. test_pr_err("%s: Failed to allocate a request", __func__);
  179. bio_put(bio);
  180. return -ENODEV;
  181. }
  182. init_request_from_bio(rq, bio);
  183. if (end_req_io)
  184. rq->end_io = end_req_io;
  185. else
  186. rq->end_io = end_test_req;
  187. test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
  188. if (!test_rq) {
  189. test_pr_err("%s: Failed to allocate a test request", __func__);
  190. bio_put(bio);
  191. blk_put_request(rq);
  192. return -ENODEV;
  193. }
  194. test_rq->req_completed = false;
  195. test_rq->req_result = -EINVAL;
  196. test_rq->rq = rq;
  197. test_rq->is_err_expected = is_err_expcted;
  198. rq->elv.priv[0] = (void *)test_rq;
  199. test_rq->req_id = ptd->unique_next_req_id++;
  200. test_pr_debug(
  201. "%s: added request %d to the test requests list, type = %d",
  202. __func__, test_rq->req_id, req_unique);
  203. spin_lock_irq(ptd->req_q->queue_lock);
  204. list_add_tail(&test_rq->queuelist, &ptd->test_queue);
  205. ptd->test_count++;
  206. spin_unlock_irq(ptd->req_q->queue_lock);
  207. return 0;
  208. }
  209. EXPORT_SYMBOL(test_iosched_add_unique_test_req);
  210. /*
  211. * Get a pattern to be filled in the request data buffer.
  212. * If the pattern used is (-1) the buffer will be filled with sequential
  213. * numbers
  214. */
  215. static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
  216. {
  217. int i = 0;
  218. int num_of_dwords = num_bytes/sizeof(int);
  219. if (pattern == TEST_NO_PATTERN)
  220. return;
  221. /* num_bytes should be aligned to sizeof(int) */
  222. BUG_ON((num_bytes % sizeof(int)) != 0);
  223. if (pattern == TEST_PATTERN_SEQUENTIAL) {
  224. for (i = 0; i < num_of_dwords; i++)
  225. buf[i] = i;
  226. } else {
  227. for (i = 0; i < num_of_dwords; i++)
  228. buf[i] = pattern;
  229. }
  230. }
  231. /**
  232. * test_iosched_create_test_req() - Create a read/write request.
  233. * @is_err_expcted: A flag to indicate if this request
  234. * should succeed or not
  235. * @direction: READ/WRITE
  236. * @start_sec: start address of the first bio
  237. * @num_bios: number of BIOs to be allocated for the
  238. * request
  239. * @pattern: A pattern, to be written into the write
  240. * requests data buffer. In case of READ
  241. * request, the given pattern is kept as
  242. * the expected pattern. The expected
  243. * pattern will be compared in the test
  244. * check result function. If no comparisson
  245. * is required, set pattern to
  246. * TEST_NO_PATTERN.
  247. * @end_req_io: specific completion callback. When not
  248. * set,the default callback will be used
  249. *
  250. * This function allocates the test request and the block
  251. * request and calls blk_rq_map_kern which allocates the
  252. * required BIO. The allocated test request and the block
  253. * request memory is freed at the end of the test and the
  254. * allocated BIO memory is freed by end_test_bio.
  255. */
  256. struct test_request *test_iosched_create_test_req(int is_err_expcted,
  257. int direction, int start_sec,
  258. int num_bios, int pattern, rq_end_io_fn *end_req_io)
  259. {
  260. struct request *rq;
  261. struct test_request *test_rq;
  262. int rw_flags, buf_size;
  263. int ret = 0, i;
  264. unsigned int *bio_ptr = NULL;
  265. struct bio *bio = NULL;
  266. if (!ptd)
  267. return NULL;
  268. rw_flags = direction;
  269. rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
  270. if (!rq) {
  271. test_pr_err("%s: Failed to allocate a request", __func__);
  272. return NULL;
  273. }
  274. test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
  275. if (!test_rq) {
  276. test_pr_err("%s: Failed to allocate test request", __func__);
  277. blk_put_request(rq);
  278. return NULL;
  279. }
  280. buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
  281. test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
  282. if (!test_rq->bios_buffer) {
  283. test_pr_err("%s: Failed to allocate the data buf", __func__);
  284. goto err;
  285. }
  286. test_rq->buf_size = buf_size;
  287. if (direction == WRITE)
  288. fill_buf_with_pattern(test_rq->bios_buffer,
  289. buf_size, pattern);
  290. test_rq->wr_rd_data_pattern = pattern;
  291. bio_ptr = test_rq->bios_buffer;
  292. for (i = 0; i < num_bios; ++i) {
  293. ret = blk_rq_map_kern(ptd->req_q, rq,
  294. (void *)bio_ptr,
  295. sizeof(unsigned int)*BIO_U32_SIZE,
  296. GFP_KERNEL);
  297. if (ret) {
  298. test_pr_err("%s: blk_rq_map_kern returned error %d",
  299. __func__, ret);
  300. goto err;
  301. }
  302. bio_ptr += BIO_U32_SIZE;
  303. }
  304. if (end_req_io)
  305. rq->end_io = end_req_io;
  306. else
  307. rq->end_io = end_test_req;
  308. rq->__sector = start_sec;
  309. rq->cmd_type |= REQ_TYPE_FS;
  310. rq->cmd_flags |= REQ_SORTED;
  311. if (rq->bio) {
  312. rq->bio->bi_sector = start_sec;
  313. rq->bio->bi_end_io = end_test_bio;
  314. bio = rq->bio;
  315. while ((bio = bio->bi_next) != NULL)
  316. bio->bi_end_io = end_test_bio;
  317. }
  318. ptd->num_of_write_bios += num_bios;
  319. test_rq->req_id = ptd->wr_rd_next_req_id++;
  320. test_rq->req_completed = false;
  321. test_rq->req_result = -EINVAL;
  322. test_rq->rq = rq;
  323. if (ptd->test_info.get_rq_disk_fn)
  324. test_rq->rq->rq_disk = ptd->test_info.get_rq_disk_fn();
  325. test_rq->is_err_expected = is_err_expcted;
  326. rq->elv.priv[0] = (void *)test_rq;
  327. test_pr_debug("%s: created test request %d, buf_size=%d",
  328. __func__, test_rq->req_id, buf_size);
  329. return test_rq;
  330. err:
  331. blk_put_request(rq);
  332. kfree(test_rq->bios_buffer);
  333. return NULL;
  334. }
  335. EXPORT_SYMBOL(test_iosched_create_test_req);
  336. /**
  337. * test_iosched_add_wr_rd_test_req() - Create and queue a
  338. * read/write request.
  339. * @is_err_expcted: A flag to indicate if this request
  340. * should succeed or not
  341. * @direction: READ/WRITE
  342. * @start_sec: start address of the first bio
  343. * @num_bios: number of BIOs to be allocated for the
  344. * request
  345. * @pattern: A pattern, to be written into the write
  346. * requests data buffer. In case of READ
  347. * request, the given pattern is kept as
  348. * the expected pattern. The expected
  349. * pattern will be compared in the test
  350. * check result function. If no comparisson
  351. * is required, set pattern to
  352. * TEST_NO_PATTERN.
  353. * @end_req_io: specific completion callback. When not
  354. * set,the default callback will be used
  355. *
  356. * This function allocates the test request and the block
  357. * request and calls blk_rq_map_kern which allocates the
  358. * required BIO. Upon success the new request is added to the
  359. * test_queue. The allocated test request and the block request
  360. * memory is freed at the end of the test and the allocated BIO
  361. * memory is freed by end_test_bio.
  362. */
  363. int test_iosched_add_wr_rd_test_req(int is_err_expcted,
  364. int direction, int start_sec,
  365. int num_bios, int pattern, rq_end_io_fn *end_req_io)
  366. {
  367. struct test_request *test_rq = NULL;
  368. test_rq = test_iosched_create_test_req(is_err_expcted,
  369. direction, start_sec,
  370. num_bios, pattern, end_req_io);
  371. if (test_rq) {
  372. spin_lock_irq(ptd->req_q->queue_lock);
  373. list_add_tail(&test_rq->queuelist, &ptd->test_queue);
  374. ptd->test_count++;
  375. spin_unlock_irq(ptd->req_q->queue_lock);
  376. return 0;
  377. }
  378. return -ENODEV;
  379. }
  380. EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
  381. /* Converts the testcase number into a string */
  382. static char *get_test_case_str(struct test_data *td)
  383. {
  384. if (td->test_info.get_test_case_str_fn)
  385. return td->test_info.get_test_case_str_fn(td);
  386. return "Unknown testcase";
  387. }
  388. /*
  389. * Verify that the test request data buffer includes the expected
  390. * pattern
  391. */
  392. static int compare_buffer_to_pattern(struct test_request *test_rq)
  393. {
  394. int i = 0;
  395. int num_of_dwords = test_rq->buf_size/sizeof(int);
  396. /* num_bytes should be aligned to sizeof(int) */
  397. BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
  398. BUG_ON(test_rq->bios_buffer == NULL);
  399. if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
  400. return 0;
  401. if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
  402. for (i = 0; i < num_of_dwords; i++) {
  403. if (test_rq->bios_buffer[i] != i) {
  404. test_pr_err(
  405. "%s: wrong pattern 0x%x in index %d",
  406. __func__, test_rq->bios_buffer[i], i);
  407. return -EINVAL;
  408. }
  409. }
  410. } else {
  411. for (i = 0; i < num_of_dwords; i++) {
  412. if (test_rq->bios_buffer[i] !=
  413. test_rq->wr_rd_data_pattern) {
  414. test_pr_err(
  415. "%s: wrong pattern 0x%x in index %d",
  416. __func__, test_rq->bios_buffer[i], i);
  417. return -EINVAL;
  418. }
  419. }
  420. }
  421. return 0;
  422. }
  423. /*
  424. * Determine if the test passed or failed.
  425. * The function checks the test request completion value and calls
  426. * check_testcase_result for result checking that are specific
  427. * to a test case.
  428. */
  429. static int check_test_result(struct test_data *td)
  430. {
  431. struct test_request *test_rq;
  432. int res = 0;
  433. static int run;
  434. if (!ptd)
  435. goto err;
  436. list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
  437. if (!test_rq->rq) {
  438. test_pr_info("%s: req_id %d is contains empty req",
  439. __func__, test_rq->req_id);
  440. continue;
  441. }
  442. if (!test_rq->req_completed) {
  443. test_pr_err("%s: rq %d not completed", __func__,
  444. test_rq->req_id);
  445. res = -EINVAL;
  446. goto err;
  447. }
  448. if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
  449. test_pr_err(
  450. "%s: rq %d completed with err, not as expected",
  451. __func__, test_rq->req_id);
  452. res = -EINVAL;
  453. goto err;
  454. }
  455. if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
  456. test_pr_err("%s: rq %d succeeded, not as expected",
  457. __func__, test_rq->req_id);
  458. res = -EINVAL;
  459. goto err;
  460. }
  461. if (rq_data_dir(test_rq->rq) == READ) {
  462. res = compare_buffer_to_pattern(test_rq);
  463. if (res) {
  464. test_pr_err("%s: read pattern not as expected",
  465. __func__);
  466. res = -EINVAL;
  467. goto err;
  468. }
  469. }
  470. }
  471. if (td->test_info.check_test_result_fn) {
  472. res = td->test_info.check_test_result_fn(td);
  473. if (res)
  474. goto err;
  475. }
  476. test_pr_info("%s: %s, run# %03d, PASSED",
  477. __func__, get_test_case_str(td), ++run);
  478. td->test_result = TEST_PASSED;
  479. return 0;
  480. err:
  481. test_pr_err("%s: %s, run# %03d, FAILED",
  482. __func__, get_test_case_str(td), ++run);
  483. td->test_result = TEST_FAILED;
  484. return res;
  485. }
  486. /* Create and queue the required requests according to the test case */
  487. static int prepare_test(struct test_data *td)
  488. {
  489. int ret = 0;
  490. if (td->test_info.prepare_test_fn) {
  491. ret = td->test_info.prepare_test_fn(td);
  492. return ret;
  493. }
  494. return 0;
  495. }
  496. /* Run the test */
  497. static int run_test(struct test_data *td)
  498. {
  499. int ret = 0;
  500. if (td->test_info.run_test_fn) {
  501. ret = td->test_info.run_test_fn(td);
  502. return ret;
  503. }
  504. blk_run_queue(td->req_q);
  505. return 0;
  506. }
  507. /*
  508. * free_test_queue() - Free all allocated test requests in the given test_queue:
  509. * free their requests and BIOs buffer
  510. * @test_queue the test queue to be freed
  511. */
  512. static void free_test_queue(struct list_head *test_queue)
  513. {
  514. struct test_request *test_rq;
  515. struct bio *bio;
  516. while (!list_empty(test_queue)) {
  517. test_rq = list_entry(test_queue->next, struct test_request,
  518. queuelist);
  519. list_del_init(&test_rq->queuelist);
  520. /*
  521. * If the request was not completed we need to free its BIOs
  522. * and remove it from the packed list
  523. */
  524. if (!test_rq->req_completed) {
  525. test_pr_info(
  526. "%s: Freeing memory of an uncompleted request",
  527. __func__);
  528. list_del_init(&test_rq->rq->queuelist);
  529. while ((bio = test_rq->rq->bio) != NULL) {
  530. test_rq->rq->bio = bio->bi_next;
  531. bio_put(bio);
  532. }
  533. }
  534. blk_put_request(test_rq->rq);
  535. kfree(test_rq->bios_buffer);
  536. kfree(test_rq);
  537. }
  538. }
  539. /*
  540. * free_test_requests() - Free all allocated test requests in
  541. * all test queues in given test_data.
  542. * @td The test_data struct whos test requests will be
  543. * freed.
  544. */
  545. static void free_test_requests(struct test_data *td)
  546. {
  547. if (!td)
  548. return;
  549. if (td->urgent_count) {
  550. free_test_queue(&td->urgent_queue);
  551. td->urgent_count = 0;
  552. }
  553. if (td->test_count) {
  554. free_test_queue(&td->test_queue);
  555. td->test_count = 0;
  556. }
  557. if (td->dispatched_count) {
  558. free_test_queue(&td->dispatched_queue);
  559. td->dispatched_count = 0;
  560. }
  561. if (td->reinsert_count) {
  562. free_test_queue(&td->reinsert_queue);
  563. td->reinsert_count = 0;
  564. }
  565. }
  566. /*
  567. * post_test() - Do post test operations. Free the allocated
  568. * test requests, their requests and BIOs buffer.
  569. * @td The test_data struct for the test that has
  570. * ended.
  571. */
  572. static int post_test(struct test_data *td)
  573. {
  574. int ret = 0;
  575. if (td->test_info.post_test_fn)
  576. ret = td->test_info.post_test_fn(td);
  577. ptd->test_info.testcase = 0;
  578. ptd->test_state = TEST_IDLE;
  579. free_test_requests(td);
  580. return ret;
  581. }
  582. /*
  583. * The timer verifies that the test will be completed even if we don't get
  584. * the completion callback for all the requests.
  585. */
  586. static void test_timeout_handler(unsigned long data)
  587. {
  588. struct test_data *td = (struct test_data *)data;
  589. test_pr_info("%s: TIMEOUT timer expired", __func__);
  590. td->test_state = TEST_COMPLETED;
  591. wake_up(&td->wait_q);
  592. return;
  593. }
  594. static unsigned int get_timeout_msec(struct test_data *td)
  595. {
  596. if (td->test_info.timeout_msec)
  597. return td->test_info.timeout_msec;
  598. else
  599. return TIMEOUT_TIMER_MS;
  600. }
  601. /**
  602. * test_iosched_start_test() - Prepares and runs the test.
  603. * The members test_duration and test_byte_count of the input
  604. * parameter t_info are modified by this function.
  605. * @t_info: the current test testcase and callbacks
  606. * functions
  607. *
  608. * The function also checks the test result upon test completion
  609. */
  610. int test_iosched_start_test(struct test_info *t_info)
  611. {
  612. int ret = 0;
  613. unsigned timeout_msec;
  614. int counter = 0;
  615. char *test_name = NULL;
  616. if (!ptd)
  617. return -ENODEV;
  618. if (!t_info) {
  619. ptd->test_result = TEST_FAILED;
  620. return -EINVAL;
  621. }
  622. do {
  623. if (ptd->ignore_round)
  624. /*
  625. * We ignored the last run due to FS write requests.
  626. * Sleep to allow those requests to be issued
  627. */
  628. msleep(2000);
  629. spin_lock(&ptd->lock);
  630. if (ptd->test_state != TEST_IDLE) {
  631. test_pr_info(
  632. "%s: Another test is running, try again later",
  633. __func__);
  634. spin_unlock(&ptd->lock);
  635. return -EBUSY;
  636. }
  637. if (ptd->start_sector == 0) {
  638. test_pr_err("%s: Invalid start sector", __func__);
  639. ptd->test_result = TEST_FAILED;
  640. spin_unlock(&ptd->lock);
  641. return -EINVAL;
  642. }
  643. memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
  644. ptd->test_result = TEST_NO_RESULT;
  645. ptd->num_of_write_bios = 0;
  646. ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
  647. ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
  648. ptd->ignore_round = false;
  649. ptd->fs_wr_reqs_during_test = false;
  650. ptd->test_state = TEST_RUNNING;
  651. spin_unlock(&ptd->lock);
  652. /*
  653. * Give an already dispatch request from
  654. * FS a chanse to complete
  655. */
  656. msleep(2000);
  657. timeout_msec = get_timeout_msec(ptd);
  658. mod_timer(&ptd->timeout_timer, jiffies +
  659. msecs_to_jiffies(timeout_msec));
  660. if (ptd->test_info.get_test_case_str_fn)
  661. test_name = ptd->test_info.get_test_case_str_fn(ptd);
  662. else
  663. test_name = "Unknown testcase";
  664. test_pr_info("%s: Starting test %s", __func__, test_name);
  665. ret = prepare_test(ptd);
  666. if (ret) {
  667. test_pr_err("%s: failed to prepare the test\n",
  668. __func__);
  669. goto error;
  670. }
  671. ptd->test_info.test_duration = ktime_get();
  672. ret = run_test(ptd);
  673. if (ret) {
  674. test_pr_err("%s: failed to run the test\n", __func__);
  675. goto error;
  676. }
  677. test_pr_info("%s: Waiting for the test completion", __func__);
  678. wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
  679. del_timer_sync(&ptd->timeout_timer);
  680. memcpy(t_info, &ptd->test_info, sizeof(struct test_info));
  681. ret = check_test_result(ptd);
  682. if (ret) {
  683. test_pr_err("%s: check_test_result failed\n",
  684. __func__);
  685. goto error;
  686. }
  687. ret = post_test(ptd);
  688. if (ret) {
  689. test_pr_err("%s: post_test failed\n", __func__);
  690. goto error;
  691. }
  692. /*
  693. * Wakeup the queue thread to fetch FS requests that might got
  694. * postponded due to the test
  695. */
  696. blk_run_queue(ptd->req_q);
  697. if (ptd->ignore_round)
  698. test_pr_info(
  699. "%s: Round canceled (Got wr reqs in the middle)",
  700. __func__);
  701. if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
  702. test_pr_info("%s: Too many rounds, did not succeed...",
  703. __func__);
  704. ptd->test_result = TEST_FAILED;
  705. }
  706. } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
  707. if (ptd->test_result == TEST_PASSED)
  708. return 0;
  709. else
  710. return -EINVAL;
  711. error:
  712. post_test(ptd);
  713. ptd->test_result = TEST_FAILED;
  714. return ret;
  715. }
  716. EXPORT_SYMBOL(test_iosched_start_test);
  717. /**
  718. * test_iosched_register() - register a block device test
  719. * utility.
  720. * @bdt: the block device test type to register
  721. */
  722. void test_iosched_register(struct blk_dev_test_type *bdt)
  723. {
  724. spin_lock(&blk_dev_test_list_lock);
  725. list_add_tail(&bdt->list, &blk_dev_test_list);
  726. spin_unlock(&blk_dev_test_list_lock);
  727. }
  728. EXPORT_SYMBOL_GPL(test_iosched_register);
  729. /**
  730. * test_iosched_unregister() - unregister a block device test
  731. * utility.
  732. * @bdt: the block device test type to unregister
  733. */
  734. void test_iosched_unregister(struct blk_dev_test_type *bdt)
  735. {
  736. spin_lock(&blk_dev_test_list_lock);
  737. list_del_init(&bdt->list);
  738. spin_unlock(&blk_dev_test_list_lock);
  739. }
  740. EXPORT_SYMBOL_GPL(test_iosched_unregister);
  741. /**
  742. * test_iosched_set_test_result() - Set the test
  743. * result(PASS/FAIL)
  744. * @test_result: the test result
  745. */
  746. void test_iosched_set_test_result(int test_result)
  747. {
  748. if (!ptd)
  749. return;
  750. ptd->test_result = test_result;
  751. }
  752. EXPORT_SYMBOL(test_iosched_set_test_result);
  753. /**
  754. * test_iosched_set_ignore_round() - Set the ignore_round flag
  755. * @ignore_round: A flag to indicate if this test round
  756. * should be ignored and re-run
  757. */
  758. void test_iosched_set_ignore_round(bool ignore_round)
  759. {
  760. if (!ptd)
  761. return;
  762. ptd->ignore_round = ignore_round;
  763. }
  764. EXPORT_SYMBOL(test_iosched_set_ignore_round);
  765. /**
  766. * test_iosched_get_debugfs_tests_root() - returns the root
  767. * debugfs directory for the test_iosched tests
  768. */
  769. struct dentry *test_iosched_get_debugfs_tests_root(void)
  770. {
  771. if (!ptd)
  772. return NULL;
  773. return ptd->debug.debug_tests_root;
  774. }
  775. EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
  776. /**
  777. * test_iosched_get_debugfs_utils_root() - returns the root
  778. * debugfs directory for the test_iosched utils
  779. */
  780. struct dentry *test_iosched_get_debugfs_utils_root(void)
  781. {
  782. if (!ptd)
  783. return NULL;
  784. return ptd->debug.debug_utils_root;
  785. }
  786. EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
  787. static int test_debugfs_init(struct test_data *td)
  788. {
  789. td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
  790. if (!td->debug.debug_root)
  791. return -ENOENT;
  792. td->debug.debug_tests_root = debugfs_create_dir("tests",
  793. td->debug.debug_root);
  794. if (!td->debug.debug_tests_root)
  795. goto err;
  796. td->debug.debug_utils_root = debugfs_create_dir("utils",
  797. td->debug.debug_root);
  798. if (!td->debug.debug_utils_root)
  799. goto err;
  800. td->debug.debug_test_result = debugfs_create_u32(
  801. "test_result",
  802. S_IRUGO | S_IWUGO,
  803. td->debug.debug_utils_root,
  804. &td->test_result);
  805. if (!td->debug.debug_test_result)
  806. goto err;
  807. td->debug.start_sector = debugfs_create_u32(
  808. "start_sector",
  809. S_IRUGO | S_IWUGO,
  810. td->debug.debug_utils_root,
  811. &td->start_sector);
  812. if (!td->debug.start_sector)
  813. goto err;
  814. return 0;
  815. err:
  816. debugfs_remove_recursive(td->debug.debug_root);
  817. return -ENOENT;
  818. }
  819. static void test_debugfs_cleanup(struct test_data *td)
  820. {
  821. debugfs_remove_recursive(td->debug.debug_root);
  822. }
  823. static void print_req(struct request *req)
  824. {
  825. struct bio *bio;
  826. struct test_request *test_rq;
  827. if (!req)
  828. return;
  829. test_rq = (struct test_request *)req->elv.priv[0];
  830. if (test_rq) {
  831. test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
  832. __func__, test_rq->req_id, (unsigned long)req->__sector);
  833. test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
  834. __func__, req->nr_phys_segments, blk_rq_sectors(req));
  835. bio = req->bio;
  836. test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
  837. __func__, bio->bi_size,
  838. (unsigned long)bio->bi_sector);
  839. while ((bio = bio->bi_next) != NULL) {
  840. test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
  841. __func__, bio->bi_size,
  842. (unsigned long)bio->bi_sector);
  843. }
  844. }
  845. }
  846. static void test_merged_requests(struct request_queue *q,
  847. struct request *rq, struct request *next)
  848. {
  849. list_del_init(&next->queuelist);
  850. }
  851. /*
  852. * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
  853. * Also update th dispatched_count counter.
  854. */
  855. static int test_dispatch_from(struct request_queue *q,
  856. struct list_head *queue, unsigned int *count)
  857. {
  858. struct test_request *test_rq;
  859. struct request *rq;
  860. int ret = 0;
  861. if (!ptd)
  862. goto err;
  863. spin_lock_irq(&ptd->lock);
  864. if (!list_empty(queue)) {
  865. test_rq = list_entry(queue->next, struct test_request,
  866. queuelist);
  867. rq = test_rq->rq;
  868. if (!rq) {
  869. pr_err("%s: null request,return", __func__);
  870. spin_unlock_irq(&ptd->lock);
  871. goto err;
  872. }
  873. list_move_tail(&test_rq->queuelist, &ptd->dispatched_queue);
  874. ptd->dispatched_count++;
  875. (*count)--;
  876. spin_unlock_irq(&ptd->lock);
  877. print_req(rq);
  878. elv_dispatch_sort(q, rq);
  879. ptd->test_info.test_byte_count += test_rq->buf_size;
  880. ret = 1;
  881. goto err;
  882. }
  883. spin_unlock_irq(&ptd->lock);
  884. err:
  885. return ret;
  886. }
  887. /*
  888. * Dispatch a test request in case there is a running test Otherwise, dispatch
  889. * a request that was queued by the FS to keep the card functional.
  890. */
  891. static int test_dispatch_requests(struct request_queue *q, int force)
  892. {
  893. struct test_data *td = q->elevator->elevator_data;
  894. struct request *rq = NULL;
  895. int ret = 0;
  896. switch (td->test_state) {
  897. case TEST_IDLE:
  898. if (!list_empty(&td->queue)) {
  899. rq = list_entry(td->queue.next, struct request,
  900. queuelist);
  901. list_del_init(&rq->queuelist);
  902. elv_dispatch_sort(q, rq);
  903. ret = 1;
  904. goto exit;
  905. }
  906. break;
  907. case TEST_RUNNING:
  908. if (test_dispatch_from(q, &td->urgent_queue,
  909. &td->urgent_count)) {
  910. test_pr_debug("%s: Dispatched from urgent_count=%d",
  911. __func__, ptd->urgent_count);
  912. ret = 1;
  913. goto exit;
  914. }
  915. if (test_dispatch_from(q, &td->reinsert_queue,
  916. &td->reinsert_count)) {
  917. test_pr_debug("%s: Dispatched from reinsert_count=%d",
  918. __func__, ptd->reinsert_count);
  919. ret = 1;
  920. goto exit;
  921. }
  922. if (test_dispatch_from(q, &td->test_queue, &td->test_count)) {
  923. test_pr_debug("%s: Dispatched from test_count=%d",
  924. __func__, ptd->test_count);
  925. ret = 1;
  926. goto exit;
  927. }
  928. break;
  929. case TEST_COMPLETED:
  930. default:
  931. break;
  932. }
  933. exit:
  934. return ret;
  935. }
  936. static void test_add_request(struct request_queue *q, struct request *rq)
  937. {
  938. struct test_data *td = q->elevator->elevator_data;
  939. list_add_tail(&rq->queuelist, &td->queue);
  940. /*
  941. * The write requests can be followed by a FLUSH request that might
  942. * cause unexpected results of the test.
  943. */
  944. if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
  945. test_pr_debug("%s: got WRITE req in the middle of the test",
  946. __func__);
  947. td->fs_wr_reqs_during_test = true;
  948. }
  949. }
  950. static struct request *
  951. test_former_request(struct request_queue *q, struct request *rq)
  952. {
  953. struct test_data *td = q->elevator->elevator_data;
  954. if (rq->queuelist.prev == &td->queue)
  955. return NULL;
  956. return list_entry(rq->queuelist.prev, struct request, queuelist);
  957. }
  958. static struct request *
  959. test_latter_request(struct request_queue *q, struct request *rq)
  960. {
  961. struct test_data *td = q->elevator->elevator_data;
  962. if (rq->queuelist.next == &td->queue)
  963. return NULL;
  964. return list_entry(rq->queuelist.next, struct request, queuelist);
  965. }
  966. static void *test_init_queue(struct request_queue *q)
  967. {
  968. struct blk_dev_test_type *__bdt;
  969. ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
  970. q->node);
  971. if (!ptd) {
  972. test_pr_err("%s: failed to allocate test data", __func__);
  973. return NULL;
  974. }
  975. memset((void *)ptd, 0, sizeof(struct test_data));
  976. INIT_LIST_HEAD(&ptd->queue);
  977. INIT_LIST_HEAD(&ptd->test_queue);
  978. INIT_LIST_HEAD(&ptd->dispatched_queue);
  979. INIT_LIST_HEAD(&ptd->reinsert_queue);
  980. INIT_LIST_HEAD(&ptd->urgent_queue);
  981. init_waitqueue_head(&ptd->wait_q);
  982. ptd->req_q = q;
  983. setup_timer(&ptd->timeout_timer, test_timeout_handler,
  984. (unsigned long)ptd);
  985. spin_lock_init(&ptd->lock);
  986. if (test_debugfs_init(ptd)) {
  987. test_pr_err("%s: Failed to create debugfs files", __func__);
  988. return NULL;
  989. }
  990. list_for_each_entry(__bdt, &blk_dev_test_list, list)
  991. __bdt->init_fn();
  992. return ptd;
  993. }
  994. static void test_exit_queue(struct elevator_queue *e)
  995. {
  996. struct test_data *td = e->elevator_data;
  997. struct blk_dev_test_type *__bdt;
  998. BUG_ON(!list_empty(&td->queue));
  999. list_for_each_entry(__bdt, &blk_dev_test_list, list)
  1000. __bdt->exit_fn();
  1001. test_debugfs_cleanup(td);
  1002. kfree(td);
  1003. }
  1004. /**
  1005. * test_get_test_data() - Returns a pointer to the test_data
  1006. * struct which keeps the current test data.
  1007. *
  1008. */
  1009. struct test_data *test_get_test_data(void)
  1010. {
  1011. return ptd;
  1012. }
  1013. EXPORT_SYMBOL(test_get_test_data);
  1014. static bool test_urgent_pending(struct request_queue *q)
  1015. {
  1016. return !list_empty(&ptd->urgent_queue);
  1017. }
  1018. /**
  1019. * test_iosched_add_urgent_req() - Add an urgent test_request.
  1020. * First mark the request as urgent, then add it to the
  1021. * urgent_queue test queue.
  1022. * @test_rq: pointer to the urgent test_request to be
  1023. * added.
  1024. *
  1025. */
  1026. void test_iosched_add_urgent_req(struct test_request *test_rq)
  1027. {
  1028. spin_lock_irq(&ptd->lock);
  1029. test_rq->rq->cmd_flags |= REQ_URGENT;
  1030. list_add_tail(&test_rq->queuelist, &ptd->urgent_queue);
  1031. ptd->urgent_count++;
  1032. spin_unlock_irq(&ptd->lock);
  1033. }
  1034. EXPORT_SYMBOL(test_iosched_add_urgent_req);
  1035. /**
  1036. * test_reinsert_req() - Moves the @rq request from
  1037. * @dispatched_queue into @reinsert_queue.
  1038. * The @rq must be in @dispatched_queue
  1039. * @q: request queue
  1040. * @rq: request to be inserted
  1041. *
  1042. *
  1043. */
  1044. static int test_reinsert_req(struct request_queue *q,
  1045. struct request *rq)
  1046. {
  1047. struct test_request *test_rq;
  1048. int ret = -EINVAL;
  1049. if (!ptd)
  1050. goto exit;
  1051. if (list_empty(&ptd->dispatched_queue)) {
  1052. test_pr_err("%s: dispatched_queue is empty", __func__);
  1053. goto exit;
  1054. }
  1055. list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
  1056. if (test_rq->rq == rq) {
  1057. list_move(&test_rq->queuelist, &ptd->reinsert_queue);
  1058. ptd->dispatched_count--;
  1059. ptd->reinsert_count++;
  1060. ret = 0;
  1061. break;
  1062. }
  1063. }
  1064. exit:
  1065. return ret;
  1066. }
  1067. static struct elevator_type elevator_test_iosched = {
  1068. .ops = {
  1069. .elevator_merge_req_fn = test_merged_requests,
  1070. .elevator_dispatch_fn = test_dispatch_requests,
  1071. .elevator_add_req_fn = test_add_request,
  1072. .elevator_former_req_fn = test_former_request,
  1073. .elevator_latter_req_fn = test_latter_request,
  1074. .elevator_init_fn = test_init_queue,
  1075. .elevator_exit_fn = test_exit_queue,
  1076. .elevator_is_urgent_fn = test_urgent_pending,
  1077. .elevator_reinsert_req_fn = test_reinsert_req,
  1078. },
  1079. .elevator_name = "test-iosched",
  1080. .elevator_owner = THIS_MODULE,
  1081. };
  1082. static int __init test_init(void)
  1083. {
  1084. elv_register(&elevator_test_iosched);
  1085. return 0;
  1086. }
  1087. static void __exit test_exit(void)
  1088. {
  1089. elv_unregister(&elevator_test_iosched);
  1090. }
  1091. module_init(test_init);
  1092. module_exit(test_exit);
  1093. MODULE_LICENSE("GPL v2");
  1094. MODULE_DESCRIPTION("Test IO scheduler");