mq-deadline.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
  3. * for the blk-mq scheduling framework
  4. *
  5. * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/fs.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/blk-mq.h>
  11. #include <linux/elevator.h>
  12. #include <linux/bio.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/compiler.h>
  17. #include <linux/rbtree.h>
  18. #include <linux/sbitmap.h>
  19. #include "blk.h"
  20. #include "blk-mq.h"
  21. #include "blk-mq-debugfs.h"
  22. #include "blk-mq-tag.h"
  23. #include "blk-mq-sched.h"
  24. /*
  25. * See Documentation/block/deadline-iosched.txt
  26. */
  27. static const int read_expire = HZ / 2; /* max time before a read is submitted. */
  28. static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  29. static const int writes_starved = 2; /* max times reads can starve a write */
  30. static const int fifo_batch = 16; /* # of sequential requests treated as one
  31. by the above parameters. For throughput. */
  32. struct deadline_data {
  33. /*
  34. * run time data
  35. */
  36. /*
  37. * requests (deadline_rq s) are present on both sort_list and fifo_list
  38. */
  39. struct rb_root sort_list[2];
  40. struct list_head fifo_list[2];
  41. /*
  42. * next in sort order. read, write or both are NULL
  43. */
  44. struct request *next_rq[2];
  45. unsigned int batching; /* number of sequential requests made */
  46. unsigned int starved; /* times reads have starved writes */
  47. /*
  48. * settings that change how the i/o scheduler behaves
  49. */
  50. int fifo_expire[2];
  51. int fifo_batch;
  52. int writes_starved;
  53. int front_merges;
  54. spinlock_t lock;
  55. struct list_head dispatch;
  56. };
  57. static inline struct rb_root *
  58. deadline_rb_root(struct deadline_data *dd, struct request *rq)
  59. {
  60. return &dd->sort_list[rq_data_dir(rq)];
  61. }
  62. /*
  63. * get the request after `rq' in sector-sorted order
  64. */
  65. static inline struct request *
  66. deadline_latter_request(struct request *rq)
  67. {
  68. struct rb_node *node = rb_next(&rq->rb_node);
  69. if (node)
  70. return rb_entry_rq(node);
  71. return NULL;
  72. }
  73. static void
  74. deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
  75. {
  76. struct rb_root *root = deadline_rb_root(dd, rq);
  77. elv_rb_add(root, rq);
  78. }
  79. static inline void
  80. deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
  81. {
  82. const int data_dir = rq_data_dir(rq);
  83. if (dd->next_rq[data_dir] == rq)
  84. dd->next_rq[data_dir] = deadline_latter_request(rq);
  85. elv_rb_del(deadline_rb_root(dd, rq), rq);
  86. }
  87. /*
  88. * remove rq from rbtree and fifo.
  89. */
  90. static void deadline_remove_request(struct request_queue *q, struct request *rq)
  91. {
  92. struct deadline_data *dd = q->elevator->elevator_data;
  93. list_del_init(&rq->queuelist);
  94. /*
  95. * We might not be on the rbtree, if we are doing an insert merge
  96. */
  97. if (!RB_EMPTY_NODE(&rq->rb_node))
  98. deadline_del_rq_rb(dd, rq);
  99. elv_rqhash_del(q, rq);
  100. if (q->last_merge == rq)
  101. q->last_merge = NULL;
  102. }
  103. static void dd_request_merged(struct request_queue *q, struct request *req,
  104. enum elv_merge type)
  105. {
  106. struct deadline_data *dd = q->elevator->elevator_data;
  107. /*
  108. * if the merge was a front merge, we need to reposition request
  109. */
  110. if (type == ELEVATOR_FRONT_MERGE) {
  111. elv_rb_del(deadline_rb_root(dd, req), req);
  112. deadline_add_rq_rb(dd, req);
  113. }
  114. }
  115. static void dd_merged_requests(struct request_queue *q, struct request *req,
  116. struct request *next)
  117. {
  118. /*
  119. * if next expires before rq, assign its expire time to rq
  120. * and move into next position (next will be deleted) in fifo
  121. */
  122. if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  123. if (time_before((unsigned long)next->fifo_time,
  124. (unsigned long)req->fifo_time)) {
  125. list_move(&req->queuelist, &next->queuelist);
  126. req->fifo_time = next->fifo_time;
  127. }
  128. }
  129. /*
  130. * kill knowledge of next, this one is a goner
  131. */
  132. deadline_remove_request(q, next);
  133. }
  134. /*
  135. * move an entry to dispatch queue
  136. */
  137. static void
  138. deadline_move_request(struct deadline_data *dd, struct request *rq)
  139. {
  140. const int data_dir = rq_data_dir(rq);
  141. dd->next_rq[READ] = NULL;
  142. dd->next_rq[WRITE] = NULL;
  143. dd->next_rq[data_dir] = deadline_latter_request(rq);
  144. /*
  145. * take it off the sort and fifo list
  146. */
  147. deadline_remove_request(rq->q, rq);
  148. }
  149. /*
  150. * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
  151. * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
  152. */
  153. static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
  154. {
  155. struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
  156. /*
  157. * rq is expired!
  158. */
  159. if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
  160. return 1;
  161. return 0;
  162. }
  163. /*
  164. * deadline_dispatch_requests selects the best request according to
  165. * read/write expire, fifo_batch, etc
  166. */
  167. static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
  168. {
  169. struct deadline_data *dd = hctx->queue->elevator->elevator_data;
  170. struct request *rq;
  171. bool reads, writes;
  172. int data_dir;
  173. if (!list_empty(&dd->dispatch)) {
  174. rq = list_first_entry(&dd->dispatch, struct request, queuelist);
  175. list_del_init(&rq->queuelist);
  176. goto done;
  177. }
  178. reads = !list_empty(&dd->fifo_list[READ]);
  179. writes = !list_empty(&dd->fifo_list[WRITE]);
  180. /*
  181. * batches are currently reads XOR writes
  182. */
  183. if (dd->next_rq[WRITE])
  184. rq = dd->next_rq[WRITE];
  185. else
  186. rq = dd->next_rq[READ];
  187. if (rq && dd->batching < dd->fifo_batch)
  188. /* we have a next request are still entitled to batch */
  189. goto dispatch_request;
  190. /*
  191. * at this point we are not running a batch. select the appropriate
  192. * data direction (read / write)
  193. */
  194. if (reads) {
  195. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
  196. if (writes && (dd->starved++ >= dd->writes_starved))
  197. goto dispatch_writes;
  198. data_dir = READ;
  199. goto dispatch_find_request;
  200. }
  201. /*
  202. * there are either no reads or writes have been starved
  203. */
  204. if (writes) {
  205. dispatch_writes:
  206. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
  207. dd->starved = 0;
  208. data_dir = WRITE;
  209. goto dispatch_find_request;
  210. }
  211. return NULL;
  212. dispatch_find_request:
  213. /*
  214. * we are not running a batch, find best request for selected data_dir
  215. */
  216. if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
  217. /*
  218. * A deadline has expired, the last request was in the other
  219. * direction, or we have run out of higher-sectored requests.
  220. * Start again from the request with the earliest expiry time.
  221. */
  222. rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
  223. } else {
  224. /*
  225. * The last req was the same dir and we have a next request in
  226. * sort order. No expired requests so continue on from here.
  227. */
  228. rq = dd->next_rq[data_dir];
  229. }
  230. dd->batching = 0;
  231. dispatch_request:
  232. /*
  233. * rq is the selected appropriate request.
  234. */
  235. dd->batching++;
  236. deadline_move_request(dd, rq);
  237. done:
  238. rq->rq_flags |= RQF_STARTED;
  239. return rq;
  240. }
  241. static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
  242. {
  243. struct deadline_data *dd = hctx->queue->elevator->elevator_data;
  244. struct request *rq;
  245. spin_lock(&dd->lock);
  246. rq = __dd_dispatch_request(hctx);
  247. spin_unlock(&dd->lock);
  248. return rq;
  249. }
  250. static void dd_exit_queue(struct elevator_queue *e)
  251. {
  252. struct deadline_data *dd = e->elevator_data;
  253. BUG_ON(!list_empty(&dd->fifo_list[READ]));
  254. BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
  255. kfree(dd);
  256. }
  257. /*
  258. * initialize elevator private data (deadline_data).
  259. */
  260. static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
  261. {
  262. struct deadline_data *dd;
  263. struct elevator_queue *eq;
  264. eq = elevator_alloc(q, e);
  265. if (!eq)
  266. return -ENOMEM;
  267. dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
  268. if (!dd) {
  269. kobject_put(&eq->kobj);
  270. return -ENOMEM;
  271. }
  272. eq->elevator_data = dd;
  273. INIT_LIST_HEAD(&dd->fifo_list[READ]);
  274. INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
  275. dd->sort_list[READ] = RB_ROOT;
  276. dd->sort_list[WRITE] = RB_ROOT;
  277. dd->fifo_expire[READ] = read_expire;
  278. dd->fifo_expire[WRITE] = write_expire;
  279. dd->writes_starved = writes_starved;
  280. dd->front_merges = 1;
  281. dd->fifo_batch = fifo_batch;
  282. spin_lock_init(&dd->lock);
  283. INIT_LIST_HEAD(&dd->dispatch);
  284. q->elevator = eq;
  285. return 0;
  286. }
  287. static int dd_request_merge(struct request_queue *q, struct request **rq,
  288. struct bio *bio)
  289. {
  290. struct deadline_data *dd = q->elevator->elevator_data;
  291. sector_t sector = bio_end_sector(bio);
  292. struct request *__rq;
  293. if (!dd->front_merges)
  294. return ELEVATOR_NO_MERGE;
  295. __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
  296. if (__rq) {
  297. BUG_ON(sector != blk_rq_pos(__rq));
  298. if (elv_bio_merge_ok(__rq, bio)) {
  299. *rq = __rq;
  300. return ELEVATOR_FRONT_MERGE;
  301. }
  302. }
  303. return ELEVATOR_NO_MERGE;
  304. }
  305. static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
  306. {
  307. struct request_queue *q = hctx->queue;
  308. struct deadline_data *dd = q->elevator->elevator_data;
  309. struct request *free = NULL;
  310. bool ret;
  311. spin_lock(&dd->lock);
  312. ret = blk_mq_sched_try_merge(q, bio, &free);
  313. spin_unlock(&dd->lock);
  314. if (free)
  315. blk_mq_free_request(free);
  316. return ret;
  317. }
  318. /*
  319. * add rq to rbtree and fifo
  320. */
  321. static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  322. bool at_head)
  323. {
  324. struct request_queue *q = hctx->queue;
  325. struct deadline_data *dd = q->elevator->elevator_data;
  326. const int data_dir = rq_data_dir(rq);
  327. if (blk_mq_sched_try_insert_merge(q, rq))
  328. return;
  329. blk_mq_sched_request_inserted(rq);
  330. if (at_head || blk_rq_is_passthrough(rq)) {
  331. if (at_head)
  332. list_add(&rq->queuelist, &dd->dispatch);
  333. else
  334. list_add_tail(&rq->queuelist, &dd->dispatch);
  335. } else {
  336. deadline_add_rq_rb(dd, rq);
  337. if (rq_mergeable(rq)) {
  338. elv_rqhash_add(q, rq);
  339. if (!q->last_merge)
  340. q->last_merge = rq;
  341. }
  342. /*
  343. * set expire time and add to fifo list
  344. */
  345. rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
  346. list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
  347. }
  348. }
  349. static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
  350. struct list_head *list, bool at_head)
  351. {
  352. struct request_queue *q = hctx->queue;
  353. struct deadline_data *dd = q->elevator->elevator_data;
  354. spin_lock(&dd->lock);
  355. while (!list_empty(list)) {
  356. struct request *rq;
  357. rq = list_first_entry(list, struct request, queuelist);
  358. list_del_init(&rq->queuelist);
  359. dd_insert_request(hctx, rq, at_head);
  360. }
  361. spin_unlock(&dd->lock);
  362. }
  363. static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
  364. {
  365. struct deadline_data *dd = hctx->queue->elevator->elevator_data;
  366. return !list_empty_careful(&dd->dispatch) ||
  367. !list_empty_careful(&dd->fifo_list[0]) ||
  368. !list_empty_careful(&dd->fifo_list[1]);
  369. }
  370. /*
  371. * sysfs parts below
  372. */
  373. static ssize_t
  374. deadline_var_show(int var, char *page)
  375. {
  376. return sprintf(page, "%d\n", var);
  377. }
  378. static void
  379. deadline_var_store(int *var, const char *page)
  380. {
  381. char *p = (char *) page;
  382. *var = simple_strtol(p, &p, 10);
  383. }
  384. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  385. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  386. { \
  387. struct deadline_data *dd = e->elevator_data; \
  388. int __data = __VAR; \
  389. if (__CONV) \
  390. __data = jiffies_to_msecs(__data); \
  391. return deadline_var_show(__data, (page)); \
  392. }
  393. SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
  394. SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
  395. SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
  396. SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
  397. SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
  398. #undef SHOW_FUNCTION
  399. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  400. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  401. { \
  402. struct deadline_data *dd = e->elevator_data; \
  403. int __data; \
  404. deadline_var_store(&__data, (page)); \
  405. if (__data < (MIN)) \
  406. __data = (MIN); \
  407. else if (__data > (MAX)) \
  408. __data = (MAX); \
  409. if (__CONV) \
  410. *(__PTR) = msecs_to_jiffies(__data); \
  411. else \
  412. *(__PTR) = __data; \
  413. return count; \
  414. }
  415. STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
  416. STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
  417. STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
  418. STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
  419. STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
  420. #undef STORE_FUNCTION
  421. #define DD_ATTR(name) \
  422. __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
  423. deadline_##name##_store)
  424. static struct elv_fs_entry deadline_attrs[] = {
  425. DD_ATTR(read_expire),
  426. DD_ATTR(write_expire),
  427. DD_ATTR(writes_starved),
  428. DD_ATTR(front_merges),
  429. DD_ATTR(fifo_batch),
  430. __ATTR_NULL
  431. };
  432. #ifdef CONFIG_BLK_DEBUG_FS
  433. #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
  434. static void *deadline_##name##_fifo_start(struct seq_file *m, \
  435. loff_t *pos) \
  436. __acquires(&dd->lock) \
  437. { \
  438. struct request_queue *q = m->private; \
  439. struct deadline_data *dd = q->elevator->elevator_data; \
  440. \
  441. spin_lock(&dd->lock); \
  442. return seq_list_start(&dd->fifo_list[ddir], *pos); \
  443. } \
  444. \
  445. static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
  446. loff_t *pos) \
  447. { \
  448. struct request_queue *q = m->private; \
  449. struct deadline_data *dd = q->elevator->elevator_data; \
  450. \
  451. return seq_list_next(v, &dd->fifo_list[ddir], pos); \
  452. } \
  453. \
  454. static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
  455. __releases(&dd->lock) \
  456. { \
  457. struct request_queue *q = m->private; \
  458. struct deadline_data *dd = q->elevator->elevator_data; \
  459. \
  460. spin_unlock(&dd->lock); \
  461. } \
  462. \
  463. static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
  464. .start = deadline_##name##_fifo_start, \
  465. .next = deadline_##name##_fifo_next, \
  466. .stop = deadline_##name##_fifo_stop, \
  467. .show = blk_mq_debugfs_rq_show, \
  468. }; \
  469. \
  470. static int deadline_##name##_next_rq_show(void *data, \
  471. struct seq_file *m) \
  472. { \
  473. struct request_queue *q = data; \
  474. struct deadline_data *dd = q->elevator->elevator_data; \
  475. struct request *rq = dd->next_rq[ddir]; \
  476. \
  477. if (rq) \
  478. __blk_mq_debugfs_rq_show(m, rq); \
  479. return 0; \
  480. }
  481. DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
  482. DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
  483. #undef DEADLINE_DEBUGFS_DDIR_ATTRS
  484. static int deadline_batching_show(void *data, struct seq_file *m)
  485. {
  486. struct request_queue *q = data;
  487. struct deadline_data *dd = q->elevator->elevator_data;
  488. seq_printf(m, "%u\n", dd->batching);
  489. return 0;
  490. }
  491. static int deadline_starved_show(void *data, struct seq_file *m)
  492. {
  493. struct request_queue *q = data;
  494. struct deadline_data *dd = q->elevator->elevator_data;
  495. seq_printf(m, "%u\n", dd->starved);
  496. return 0;
  497. }
  498. static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
  499. __acquires(&dd->lock)
  500. {
  501. struct request_queue *q = m->private;
  502. struct deadline_data *dd = q->elevator->elevator_data;
  503. spin_lock(&dd->lock);
  504. return seq_list_start(&dd->dispatch, *pos);
  505. }
  506. static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  507. {
  508. struct request_queue *q = m->private;
  509. struct deadline_data *dd = q->elevator->elevator_data;
  510. return seq_list_next(v, &dd->dispatch, pos);
  511. }
  512. static void deadline_dispatch_stop(struct seq_file *m, void *v)
  513. __releases(&dd->lock)
  514. {
  515. struct request_queue *q = m->private;
  516. struct deadline_data *dd = q->elevator->elevator_data;
  517. spin_unlock(&dd->lock);
  518. }
  519. static const struct seq_operations deadline_dispatch_seq_ops = {
  520. .start = deadline_dispatch_start,
  521. .next = deadline_dispatch_next,
  522. .stop = deadline_dispatch_stop,
  523. .show = blk_mq_debugfs_rq_show,
  524. };
  525. #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
  526. {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
  527. {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
  528. static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
  529. DEADLINE_QUEUE_DDIR_ATTRS(read),
  530. DEADLINE_QUEUE_DDIR_ATTRS(write),
  531. {"batching", 0400, deadline_batching_show},
  532. {"starved", 0400, deadline_starved_show},
  533. {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
  534. {},
  535. };
  536. #undef DEADLINE_QUEUE_DDIR_ATTRS
  537. #endif
  538. static struct elevator_type mq_deadline = {
  539. .ops.mq = {
  540. .insert_requests = dd_insert_requests,
  541. .dispatch_request = dd_dispatch_request,
  542. .next_request = elv_rb_latter_request,
  543. .former_request = elv_rb_former_request,
  544. .bio_merge = dd_bio_merge,
  545. .request_merge = dd_request_merge,
  546. .requests_merged = dd_merged_requests,
  547. .request_merged = dd_request_merged,
  548. .has_work = dd_has_work,
  549. .init_sched = dd_init_queue,
  550. .exit_sched = dd_exit_queue,
  551. },
  552. .uses_mq = true,
  553. #ifdef CONFIG_BLK_DEBUG_FS
  554. .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
  555. #endif
  556. .elevator_attrs = deadline_attrs,
  557. .elevator_name = "mq-deadline",
  558. .elevator_owner = THIS_MODULE,
  559. };
  560. MODULE_ALIAS("mq-deadline-iosched");
  561. static int __init deadline_init(void)
  562. {
  563. return elv_register(&mq_deadline);
  564. }
  565. static void __exit deadline_exit(void)
  566. {
  567. elv_unregister(&mq_deadline);
  568. }
  569. module_init(deadline_init);
  570. module_exit(deadline_exit);
  571. MODULE_AUTHOR("Jens Axboe");
  572. MODULE_LICENSE("GPL");
  573. MODULE_DESCRIPTION("MQ deadline IO scheduler");