fiops-iosched.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * IOPS based IO scheduler. Based on CFQ.
  3. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  4. * Shaohua Li <shli@kernel.org>
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/elevator.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/ioprio.h>
  13. #include <linux/blktrace_api.h>
  14. #include "blk.h"
  15. #define VIOS_SCALE_SHIFT 10
  16. #define VIOS_SCALE (1 << VIOS_SCALE_SHIFT)
  17. #define VIOS_READ_SCALE (1)
  18. #define VIOS_WRITE_SCALE (1)
  19. #define VIOS_SYNC_SCALE (2)
  20. #define VIOS_ASYNC_SCALE (5)
  21. #define VIOS_PRIO_SCALE (5)
  22. struct fiops_rb_root {
  23. struct rb_root rb;
  24. struct rb_node *left;
  25. unsigned count;
  26. u64 min_vios;
  27. };
  28. #define FIOPS_RB_ROOT (struct fiops_rb_root) { .rb = RB_ROOT}
  29. enum wl_prio_t {
  30. IDLE_WORKLOAD = 0,
  31. BE_WORKLOAD = 1,
  32. RT_WORKLOAD = 2,
  33. FIOPS_PRIO_NR,
  34. };
  35. struct fiops_data {
  36. struct request_queue *queue;
  37. struct fiops_rb_root service_tree[FIOPS_PRIO_NR];
  38. unsigned int busy_queues;
  39. unsigned int in_flight[2];
  40. struct work_struct unplug_work;
  41. unsigned int read_scale;
  42. unsigned int write_scale;
  43. unsigned int sync_scale;
  44. unsigned int async_scale;
  45. };
  46. struct fiops_ioc {
  47. struct io_cq icq;
  48. unsigned int flags;
  49. struct fiops_data *fiopsd;
  50. struct rb_node rb_node;
  51. u64 vios; /* key in service_tree */
  52. struct fiops_rb_root *service_tree;
  53. unsigned int in_flight;
  54. struct rb_root sort_list;
  55. struct list_head fifo;
  56. pid_t pid;
  57. unsigned short ioprio;
  58. enum wl_prio_t wl_type;
  59. };
  60. #define ioc_service_tree(ioc) (&((ioc)->fiopsd->service_tree[(ioc)->wl_type]))
  61. #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
  62. enum ioc_state_flags {
  63. FIOPS_IOC_FLAG_on_rr = 0, /* on round-robin busy list */
  64. FIOPS_IOC_FLAG_prio_changed, /* task priority has changed */
  65. };
  66. #define FIOPS_IOC_FNS(name) \
  67. static inline void fiops_mark_ioc_##name(struct fiops_ioc *ioc) \
  68. { \
  69. ioc->flags |= (1 << FIOPS_IOC_FLAG_##name); \
  70. } \
  71. static inline void fiops_clear_ioc_##name(struct fiops_ioc *ioc) \
  72. { \
  73. ioc->flags &= ~(1 << FIOPS_IOC_FLAG_##name); \
  74. } \
  75. static inline int fiops_ioc_##name(const struct fiops_ioc *ioc) \
  76. { \
  77. return ((ioc)->flags & (1 << FIOPS_IOC_FLAG_##name)) != 0; \
  78. }
  79. FIOPS_IOC_FNS(on_rr);
  80. FIOPS_IOC_FNS(prio_changed);
  81. #undef FIOPS_IOC_FNS
  82. #define fiops_log_ioc(fiopsd, ioc, fmt, args...) \
  83. blk_add_trace_msg((fiopsd)->queue, "ioc%d " fmt, (ioc)->pid, ##args)
  84. #define fiops_log(fiopsd, fmt, args...) \
  85. blk_add_trace_msg((fiopsd)->queue, "fiops " fmt, ##args)
  86. enum wl_prio_t fiops_wl_type(short prio_class)
  87. {
  88. if (prio_class == IOPRIO_CLASS_RT)
  89. return RT_WORKLOAD;
  90. if (prio_class == IOPRIO_CLASS_BE)
  91. return BE_WORKLOAD;
  92. return IDLE_WORKLOAD;
  93. }
  94. static inline struct fiops_ioc *icq_to_cic(struct io_cq *icq)
  95. {
  96. /* cic->icq is the first member, %NULL will convert to %NULL */
  97. return container_of(icq, struct fiops_ioc, icq);
  98. }
  99. static inline struct fiops_ioc *fiops_cic_lookup(struct fiops_data *fiopsd,
  100. struct io_context *ioc)
  101. {
  102. if (ioc)
  103. return icq_to_cic(ioc_lookup_icq(ioc, fiopsd->queue));
  104. return NULL;
  105. }
  106. /*
  107. * The below is leftmost cache rbtree addon
  108. */
  109. static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root)
  110. {
  111. /* Service tree is empty */
  112. if (!root->count)
  113. return NULL;
  114. if (!root->left)
  115. root->left = rb_first(&root->rb);
  116. if (root->left)
  117. return rb_entry(root->left, struct fiops_ioc, rb_node);
  118. return NULL;
  119. }
  120. static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  121. {
  122. rb_erase(n, root);
  123. RB_CLEAR_NODE(n);
  124. }
  125. static void fiops_rb_erase(struct rb_node *n, struct fiops_rb_root *root)
  126. {
  127. if (root->left == n)
  128. root->left = NULL;
  129. rb_erase_init(n, &root->rb);
  130. --root->count;
  131. }
  132. static inline u64 max_vios(u64 min_vios, u64 vios)
  133. {
  134. s64 delta = (s64)(vios - min_vios);
  135. if (delta > 0)
  136. min_vios = vios;
  137. return min_vios;
  138. }
  139. static void fiops_update_min_vios(struct fiops_rb_root *service_tree)
  140. {
  141. struct fiops_ioc *ioc;
  142. ioc = fiops_rb_first(service_tree);
  143. if (!ioc)
  144. return;
  145. service_tree->min_vios = max_vios(service_tree->min_vios, ioc->vios);
  146. }
  147. /*
  148. * The fiopsd->service_trees holds all pending fiops_ioc's that have
  149. * requests waiting to be processed. It is sorted in the order that
  150. * we will service the queues.
  151. */
  152. static void fiops_service_tree_add(struct fiops_data *fiopsd,
  153. struct fiops_ioc *ioc)
  154. {
  155. struct rb_node **p, *parent;
  156. struct fiops_ioc *__ioc;
  157. struct fiops_rb_root *service_tree = ioc_service_tree(ioc);
  158. u64 vios;
  159. int left;
  160. /* New added IOC */
  161. if (RB_EMPTY_NODE(&ioc->rb_node)) {
  162. if (ioc->in_flight > 0)
  163. vios = ioc->vios;
  164. else
  165. vios = max_vios(service_tree->min_vios, ioc->vios);
  166. } else {
  167. vios = ioc->vios;
  168. /* ioc->service_tree might not equal to service_tree */
  169. fiops_rb_erase(&ioc->rb_node, ioc->service_tree);
  170. ioc->service_tree = NULL;
  171. }
  172. fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios);
  173. left = 1;
  174. parent = NULL;
  175. ioc->service_tree = service_tree;
  176. p = &service_tree->rb.rb_node;
  177. while (*p) {
  178. struct rb_node **n;
  179. parent = *p;
  180. __ioc = rb_entry(parent, struct fiops_ioc, rb_node);
  181. /*
  182. * sort by key, that represents service time.
  183. */
  184. if (vios < __ioc->vios)
  185. n = &(*p)->rb_left;
  186. else {
  187. n = &(*p)->rb_right;
  188. left = 0;
  189. }
  190. p = n;
  191. }
  192. if (left)
  193. service_tree->left = &ioc->rb_node;
  194. ioc->vios = vios;
  195. rb_link_node(&ioc->rb_node, parent, p);
  196. rb_insert_color(&ioc->rb_node, &service_tree->rb);
  197. service_tree->count++;
  198. fiops_update_min_vios(service_tree);
  199. }
  200. /*
  201. * Update ioc's position in the service tree.
  202. */
  203. static void fiops_resort_rr_list(struct fiops_data *fiopsd,
  204. struct fiops_ioc *ioc)
  205. {
  206. /*
  207. * Resorting requires the ioc to be on the RR list already.
  208. */
  209. if (fiops_ioc_on_rr(ioc))
  210. fiops_service_tree_add(fiopsd, ioc);
  211. }
  212. /*
  213. * add to busy list of queues for service, trying to be fair in ordering
  214. * the pending list according to last request service
  215. */
  216. static void fiops_add_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc)
  217. {
  218. BUG_ON(fiops_ioc_on_rr(ioc));
  219. fiops_mark_ioc_on_rr(ioc);
  220. fiopsd->busy_queues++;
  221. fiops_resort_rr_list(fiopsd, ioc);
  222. }
  223. /*
  224. * Called when the ioc no longer has requests pending, remove it from
  225. * the service tree.
  226. */
  227. static void fiops_del_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc)
  228. {
  229. BUG_ON(!fiops_ioc_on_rr(ioc));
  230. fiops_clear_ioc_on_rr(ioc);
  231. if (!RB_EMPTY_NODE(&ioc->rb_node)) {
  232. fiops_rb_erase(&ioc->rb_node, ioc->service_tree);
  233. ioc->service_tree = NULL;
  234. }
  235. BUG_ON(!fiopsd->busy_queues);
  236. fiopsd->busy_queues--;
  237. }
  238. /*
  239. * rb tree support functions
  240. */
  241. static void fiops_del_rq_rb(struct request *rq)
  242. {
  243. struct fiops_ioc *ioc = RQ_CIC(rq);
  244. elv_rb_del(&ioc->sort_list, rq);
  245. }
  246. static void fiops_add_rq_rb(struct request *rq)
  247. {
  248. struct fiops_ioc *ioc = RQ_CIC(rq);
  249. struct fiops_data *fiopsd = ioc->fiopsd;
  250. elv_rb_add(&ioc->sort_list, rq);
  251. if (!fiops_ioc_on_rr(ioc))
  252. fiops_add_ioc_rr(fiopsd, ioc);
  253. }
  254. static void fiops_reposition_rq_rb(struct fiops_ioc *ioc, struct request *rq)
  255. {
  256. elv_rb_del(&ioc->sort_list, rq);
  257. fiops_add_rq_rb(rq);
  258. }
  259. static void fiops_remove_request(struct request *rq)
  260. {
  261. list_del_init(&rq->queuelist);
  262. fiops_del_rq_rb(rq);
  263. }
  264. static u64 fiops_scaled_vios(struct fiops_data *fiopsd,
  265. struct fiops_ioc *ioc, struct request *rq)
  266. {
  267. int vios = VIOS_SCALE;
  268. if (rq_data_dir(rq) == WRITE)
  269. vios = vios * fiopsd->write_scale / fiopsd->read_scale;
  270. if (!rq_is_sync(rq))
  271. vios = vios * fiopsd->async_scale / fiopsd->sync_scale;
  272. vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE;
  273. return vios;
  274. }
  275. /* return vios dispatched */
  276. static u64 fiops_dispatch_request(struct fiops_data *fiopsd,
  277. struct fiops_ioc *ioc)
  278. {
  279. struct request *rq;
  280. struct request_queue *q = fiopsd->queue;
  281. rq = rq_entry_fifo(ioc->fifo.next);
  282. fiops_remove_request(rq);
  283. elv_dispatch_add_tail(q, rq);
  284. fiopsd->in_flight[rq_is_sync(rq)]++;
  285. ioc->in_flight++;
  286. return fiops_scaled_vios(fiopsd, ioc, rq);
  287. }
  288. static int fiops_forced_dispatch(struct fiops_data *fiopsd)
  289. {
  290. struct fiops_ioc *ioc;
  291. int dispatched = 0;
  292. int i;
  293. for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
  294. while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
  295. ioc = fiops_rb_first(&fiopsd->service_tree[i]);
  296. while (!list_empty(&ioc->fifo)) {
  297. fiops_dispatch_request(fiopsd, ioc);
  298. dispatched++;
  299. }
  300. if (fiops_ioc_on_rr(ioc))
  301. fiops_del_ioc_rr(fiopsd, ioc);
  302. }
  303. }
  304. return dispatched;
  305. }
  306. static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd)
  307. {
  308. struct fiops_ioc *ioc;
  309. struct fiops_rb_root *service_tree = NULL;
  310. int i;
  311. struct request *rq;
  312. for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
  313. if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
  314. service_tree = &fiopsd->service_tree[i];
  315. break;
  316. }
  317. }
  318. if (!service_tree)
  319. return NULL;
  320. ioc = fiops_rb_first(service_tree);
  321. rq = rq_entry_fifo(ioc->fifo.next);
  322. /*
  323. * we are the only async task and sync requests are in flight, delay a
  324. * moment. If there are other tasks coming, sync tasks have no chance
  325. * to be starved, don't delay
  326. */
  327. if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 &&
  328. service_tree->count == 1) {
  329. fiops_log_ioc(fiopsd, ioc,
  330. "postpone async, in_flight async %d sync %d",
  331. fiopsd->in_flight[0], fiopsd->in_flight[1]);
  332. return NULL;
  333. }
  334. return ioc;
  335. }
  336. static void fiops_charge_vios(struct fiops_data *fiopsd,
  337. struct fiops_ioc *ioc, u64 vios)
  338. {
  339. struct fiops_rb_root *service_tree = ioc->service_tree;
  340. ioc->vios += vios;
  341. fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios);
  342. if (RB_EMPTY_ROOT(&ioc->sort_list))
  343. fiops_del_ioc_rr(fiopsd, ioc);
  344. else
  345. fiops_resort_rr_list(fiopsd, ioc);
  346. fiops_update_min_vios(service_tree);
  347. }
  348. static int fiops_dispatch_requests(struct request_queue *q, int force)
  349. {
  350. struct fiops_data *fiopsd = q->elevator->elevator_data;
  351. struct fiops_ioc *ioc;
  352. u64 vios;
  353. if (unlikely(force))
  354. return fiops_forced_dispatch(fiopsd);
  355. ioc = fiops_select_ioc(fiopsd);
  356. if (!ioc)
  357. return 0;
  358. vios = fiops_dispatch_request(fiopsd, ioc);
  359. fiops_charge_vios(fiopsd, ioc, vios);
  360. return 1;
  361. }
  362. static void fiops_init_prio_data(struct fiops_ioc *cic)
  363. {
  364. struct task_struct *tsk = current;
  365. struct io_context *ioc = cic->icq.ioc;
  366. int ioprio_class;
  367. if (!fiops_ioc_prio_changed(cic))
  368. return;
  369. ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
  370. switch (ioprio_class) {
  371. default:
  372. printk(KERN_ERR "fiops: bad prio %x\n", ioprio_class);
  373. case IOPRIO_CLASS_NONE:
  374. /*
  375. * no prio set, inherit CPU scheduling settings
  376. */
  377. cic->ioprio = task_nice_ioprio(tsk);
  378. cic->wl_type = fiops_wl_type(task_nice_ioclass(tsk));
  379. break;
  380. case IOPRIO_CLASS_RT:
  381. cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio);
  382. cic->wl_type = fiops_wl_type(IOPRIO_CLASS_RT);
  383. break;
  384. case IOPRIO_CLASS_BE:
  385. cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio);
  386. cic->wl_type = fiops_wl_type(IOPRIO_CLASS_BE);
  387. break;
  388. case IOPRIO_CLASS_IDLE:
  389. cic->wl_type = fiops_wl_type(IOPRIO_CLASS_IDLE);
  390. cic->ioprio = 7;
  391. break;
  392. }
  393. fiops_clear_ioc_prio_changed(cic);
  394. }
  395. static void fiops_insert_request(struct request_queue *q, struct request *rq)
  396. {
  397. struct fiops_ioc *ioc = RQ_CIC(rq);
  398. fiops_init_prio_data(ioc);
  399. list_add_tail(&rq->queuelist, &ioc->fifo);
  400. fiops_add_rq_rb(rq);
  401. }
  402. /*
  403. * scheduler run of queue, if there are requests pending and no one in the
  404. * driver that will restart queueing
  405. */
  406. static inline void fiops_schedule_dispatch(struct fiops_data *fiopsd)
  407. {
  408. if (fiopsd->busy_queues)
  409. kblockd_schedule_work(&fiopsd->unplug_work);
  410. }
  411. static void fiops_completed_request(struct request_queue *q, struct request *rq)
  412. {
  413. struct fiops_data *fiopsd = q->elevator->elevator_data;
  414. struct fiops_ioc *ioc = RQ_CIC(rq);
  415. fiopsd->in_flight[rq_is_sync(rq)]--;
  416. ioc->in_flight--;
  417. fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d",
  418. ioc->in_flight, fiopsd->busy_queues);
  419. if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0)
  420. fiops_schedule_dispatch(fiopsd);
  421. }
  422. static struct request *
  423. fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio)
  424. {
  425. struct task_struct *tsk = current;
  426. struct fiops_ioc *cic;
  427. cic = fiops_cic_lookup(fiopsd, tsk->io_context);
  428. if (cic) {
  429. return elv_rb_find(&cic->sort_list, bio_end_sector(bio));
  430. }
  431. return NULL;
  432. }
  433. static enum elv_merge fiops_merge(struct request_queue *q, struct request **req,
  434. struct bio *bio)
  435. {
  436. struct fiops_data *fiopsd = q->elevator->elevator_data;
  437. struct request *__rq;
  438. __rq = fiops_find_rq_fmerge(fiopsd, bio);
  439. if (__rq && elv_bio_merge_ok(__rq, bio)) {
  440. *req = __rq;
  441. return ELEVATOR_FRONT_MERGE;
  442. }
  443. return ELEVATOR_NO_MERGE;
  444. }
  445. static void fiops_merged_request(struct request_queue *q, struct request *req,
  446. enum elv_merge type)
  447. {
  448. if (type == ELEVATOR_FRONT_MERGE) {
  449. struct fiops_ioc *ioc = RQ_CIC(req);
  450. fiops_reposition_rq_rb(ioc, req);
  451. }
  452. }
  453. static void
  454. fiops_merged_requests(struct request_queue *q, struct request *rq,
  455. struct request *next)
  456. {
  457. struct fiops_ioc *ioc = RQ_CIC(rq);
  458. struct fiops_data *fiopsd = q->elevator->elevator_data;
  459. fiops_remove_request(next);
  460. ioc = RQ_CIC(next);
  461. /*
  462. * all requests of this task are merged to other tasks, delete it
  463. * from the service tree.
  464. */
  465. if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list))
  466. fiops_del_ioc_rr(fiopsd, ioc);
  467. }
  468. static int fiops_allow_bio_merge(struct request_queue *q, struct request *rq,
  469. struct bio *bio)
  470. {
  471. struct fiops_data *fiopsd = q->elevator->elevator_data;
  472. struct fiops_ioc *cic;
  473. /*
  474. * Lookup the ioc that this bio will be queued with. Allow
  475. * merge only if rq is queued there.
  476. */
  477. cic = fiops_cic_lookup(fiopsd, current->io_context);
  478. return cic == RQ_CIC(rq);
  479. }
  480. static int fiops_allow_rq_merge(struct request_queue *q, struct request *rq,
  481. struct request *next)
  482. {
  483. return RQ_CIC(rq) == RQ_CIC(next);
  484. }
  485. static void fiops_exit_queue(struct elevator_queue *e)
  486. {
  487. struct fiops_data *fiopsd = e->elevator_data;
  488. cancel_work_sync(&fiopsd->unplug_work);
  489. kfree(fiopsd);
  490. }
  491. static void fiops_kick_queue(struct work_struct *work)
  492. {
  493. struct fiops_data *fiopsd =
  494. container_of(work, struct fiops_data, unplug_work);
  495. struct request_queue *q = fiopsd->queue;
  496. spin_lock_irq(q->queue_lock);
  497. __blk_run_queue(q);
  498. spin_unlock_irq(q->queue_lock);
  499. }
  500. static int fiops_init_queue(struct request_queue *q, struct elevator_type *e)
  501. {
  502. struct fiops_data *fiopsd;
  503. int i;
  504. struct elevator_queue *eq;
  505. eq = elevator_alloc(q, e);
  506. if (!eq)
  507. return -ENOMEM;
  508. fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node);
  509. if (!fiopsd) {
  510. kobject_put(&eq->kobj);
  511. return -ENOMEM;
  512. }
  513. eq->elevator_data = fiopsd;
  514. fiopsd->queue = q;
  515. spin_lock_irq(q->queue_lock);
  516. q->elevator = eq;
  517. spin_unlock_irq(q->queue_lock);
  518. for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++)
  519. fiopsd->service_tree[i] = FIOPS_RB_ROOT;
  520. INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue);
  521. fiopsd->read_scale = VIOS_READ_SCALE;
  522. fiopsd->write_scale = VIOS_WRITE_SCALE;
  523. fiopsd->sync_scale = VIOS_SYNC_SCALE;
  524. fiopsd->async_scale = VIOS_ASYNC_SCALE;
  525. return 0;
  526. }
  527. static void fiops_init_icq(struct io_cq *icq)
  528. {
  529. struct fiops_data *fiopsd = icq->q->elevator->elevator_data;
  530. struct fiops_ioc *ioc = icq_to_cic(icq);
  531. RB_CLEAR_NODE(&ioc->rb_node);
  532. INIT_LIST_HEAD(&ioc->fifo);
  533. ioc->sort_list = RB_ROOT;
  534. ioc->fiopsd = fiopsd;
  535. ioc->pid = current->pid;
  536. fiops_mark_ioc_prio_changed(ioc);
  537. }
  538. /*
  539. * sysfs parts below -->
  540. */
  541. static ssize_t
  542. fiops_var_show(unsigned int var, char *page)
  543. {
  544. return sprintf(page, "%d\n", var);
  545. }
  546. static ssize_t
  547. fiops_var_store(unsigned int *var, const char *page, size_t count)
  548. {
  549. char *p = (char *) page;
  550. *var = simple_strtoul(p, &p, 10);
  551. return count;
  552. }
  553. #define SHOW_FUNCTION(__FUNC, __VAR) \
  554. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  555. { \
  556. struct fiops_data *fiopsd = e->elevator_data; \
  557. return fiops_var_show(__VAR, (page)); \
  558. }
  559. SHOW_FUNCTION(fiops_read_scale_show, fiopsd->read_scale);
  560. SHOW_FUNCTION(fiops_write_scale_show, fiopsd->write_scale);
  561. SHOW_FUNCTION(fiops_sync_scale_show, fiopsd->sync_scale);
  562. SHOW_FUNCTION(fiops_async_scale_show, fiopsd->async_scale);
  563. #undef SHOW_FUNCTION
  564. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
  565. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  566. { \
  567. struct fiops_data *fiopsd = e->elevator_data; \
  568. unsigned int __data; \
  569. int ret = fiops_var_store(&__data, (page), count); \
  570. if (__data < (MIN)) \
  571. __data = (MIN); \
  572. else if (__data > (MAX)) \
  573. __data = (MAX); \
  574. *(__PTR) = __data; \
  575. return ret; \
  576. }
  577. STORE_FUNCTION(fiops_read_scale_store, &fiopsd->read_scale, 1, 100);
  578. STORE_FUNCTION(fiops_write_scale_store, &fiopsd->write_scale, 1, 100);
  579. STORE_FUNCTION(fiops_sync_scale_store, &fiopsd->sync_scale, 1, 100);
  580. STORE_FUNCTION(fiops_async_scale_store, &fiopsd->async_scale, 1, 100);
  581. #undef STORE_FUNCTION
  582. #define FIOPS_ATTR(name) \
  583. __ATTR(name, S_IRUGO|S_IWUSR, fiops_##name##_show, fiops_##name##_store)
  584. static struct elv_fs_entry fiops_attrs[] = {
  585. FIOPS_ATTR(read_scale),
  586. FIOPS_ATTR(write_scale),
  587. FIOPS_ATTR(sync_scale),
  588. FIOPS_ATTR(async_scale),
  589. __ATTR_NULL
  590. };
  591. static struct elevator_type iosched_fiops = {
  592. .ops.sq = {
  593. .elevator_merge_fn = fiops_merge,
  594. .elevator_merged_fn = fiops_merged_request,
  595. .elevator_merge_req_fn = fiops_merged_requests,
  596. .elevator_allow_bio_merge_fn = fiops_allow_bio_merge,
  597. .elevator_allow_rq_merge_fn = fiops_allow_rq_merge,
  598. .elevator_dispatch_fn = fiops_dispatch_requests,
  599. .elevator_add_req_fn = fiops_insert_request,
  600. .elevator_completed_req_fn = fiops_completed_request,
  601. .elevator_former_req_fn = elv_rb_former_request,
  602. .elevator_latter_req_fn = elv_rb_latter_request,
  603. .elevator_init_icq_fn = fiops_init_icq,
  604. .elevator_init_fn = fiops_init_queue,
  605. .elevator_exit_fn = fiops_exit_queue,
  606. },
  607. .icq_size = sizeof(struct fiops_ioc),
  608. .icq_align = __alignof__(struct fiops_ioc),
  609. .elevator_attrs = fiops_attrs,
  610. .elevator_name = "fiops",
  611. .elevator_owner = THIS_MODULE,
  612. };
  613. static int __init fiops_init(void)
  614. {
  615. return elv_register(&iosched_fiops);
  616. }
  617. static void __exit fiops_exit(void)
  618. {
  619. elv_unregister(&iosched_fiops);
  620. }
  621. module_init(fiops_init);
  622. module_exit(fiops_exit);
  623. MODULE_AUTHOR("Jens Axboe, Shaohua Li <shli@kernel.org>");
  624. MODULE_LICENSE("GPL");
  625. MODULE_DESCRIPTION("IOPS based IO scheduler");