blk-mq-debugfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (C) 2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <https://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/blk-mq.h>
  20. #include "blk.h"
  21. #include "blk-mq.h"
  22. #include "blk-mq-debugfs.h"
  23. #include "blk-mq-tag.h"
  24. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  25. const char *const *flag_name, int flag_name_count)
  26. {
  27. bool sep = false;
  28. int i;
  29. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  30. if (!(flags & BIT(i)))
  31. continue;
  32. if (sep)
  33. seq_puts(m, "|");
  34. sep = true;
  35. if (i < flag_name_count && flag_name[i])
  36. seq_puts(m, flag_name[i]);
  37. else
  38. seq_printf(m, "%d", i);
  39. }
  40. return 0;
  41. }
  42. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  43. static const char *const blk_queue_flag_name[] = {
  44. QUEUE_FLAG_NAME(QUEUED),
  45. QUEUE_FLAG_NAME(STOPPED),
  46. QUEUE_FLAG_NAME(DYING),
  47. QUEUE_FLAG_NAME(BYPASS),
  48. QUEUE_FLAG_NAME(BIDI),
  49. QUEUE_FLAG_NAME(NOMERGES),
  50. QUEUE_FLAG_NAME(SAME_COMP),
  51. QUEUE_FLAG_NAME(FAIL_IO),
  52. QUEUE_FLAG_NAME(STACKABLE),
  53. QUEUE_FLAG_NAME(NONROT),
  54. QUEUE_FLAG_NAME(IO_STAT),
  55. QUEUE_FLAG_NAME(DISCARD),
  56. QUEUE_FLAG_NAME(NOXMERGES),
  57. QUEUE_FLAG_NAME(ADD_RANDOM),
  58. QUEUE_FLAG_NAME(SECERASE),
  59. QUEUE_FLAG_NAME(SAME_FORCE),
  60. QUEUE_FLAG_NAME(DEAD),
  61. QUEUE_FLAG_NAME(INIT_DONE),
  62. QUEUE_FLAG_NAME(NO_SG_MERGE),
  63. QUEUE_FLAG_NAME(POLL),
  64. QUEUE_FLAG_NAME(WC),
  65. QUEUE_FLAG_NAME(FUA),
  66. QUEUE_FLAG_NAME(FLUSH_NQ),
  67. QUEUE_FLAG_NAME(DAX),
  68. QUEUE_FLAG_NAME(STATS),
  69. QUEUE_FLAG_NAME(POLL_STATS),
  70. QUEUE_FLAG_NAME(REGISTERED),
  71. QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
  72. QUEUE_FLAG_NAME(QUIESCED),
  73. };
  74. #undef QUEUE_FLAG_NAME
  75. static int queue_state_show(void *data, struct seq_file *m)
  76. {
  77. struct request_queue *q = data;
  78. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  79. ARRAY_SIZE(blk_queue_flag_name));
  80. seq_puts(m, "\n");
  81. return 0;
  82. }
  83. static ssize_t queue_state_write(void *data, const char __user *buf,
  84. size_t count, loff_t *ppos)
  85. {
  86. struct request_queue *q = data;
  87. char opbuf[16] = { }, *op;
  88. /*
  89. * The "state" attribute is removed after blk_cleanup_queue() has called
  90. * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
  91. * triggering a use-after-free.
  92. */
  93. if (blk_queue_dead(q))
  94. return -ENOENT;
  95. if (count >= sizeof(opbuf)) {
  96. pr_err("%s: operation too long\n", __func__);
  97. goto inval;
  98. }
  99. if (copy_from_user(opbuf, buf, count))
  100. return -EFAULT;
  101. op = strstrip(opbuf);
  102. if (strcmp(op, "run") == 0) {
  103. blk_mq_run_hw_queues(q, true);
  104. } else if (strcmp(op, "start") == 0) {
  105. blk_mq_start_stopped_hw_queues(q, true);
  106. } else if (strcmp(op, "kick") == 0) {
  107. blk_mq_kick_requeue_list(q);
  108. } else {
  109. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  110. inval:
  111. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  112. return -EINVAL;
  113. }
  114. return count;
  115. }
  116. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  117. {
  118. if (stat->nr_samples) {
  119. seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
  120. stat->nr_samples, stat->mean, stat->min, stat->max);
  121. } else {
  122. seq_puts(m, "samples=0");
  123. }
  124. }
  125. static int queue_write_hint_show(void *data, struct seq_file *m)
  126. {
  127. struct request_queue *q = data;
  128. int i;
  129. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  130. seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
  131. return 0;
  132. }
  133. static ssize_t queue_write_hint_store(void *data, const char __user *buf,
  134. size_t count, loff_t *ppos)
  135. {
  136. struct request_queue *q = data;
  137. int i;
  138. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  139. q->write_hints[i] = 0;
  140. return count;
  141. }
  142. static int queue_poll_stat_show(void *data, struct seq_file *m)
  143. {
  144. struct request_queue *q = data;
  145. int bucket;
  146. for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
  147. seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
  148. print_stat(m, &q->poll_stat[2*bucket]);
  149. seq_puts(m, "\n");
  150. seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
  151. print_stat(m, &q->poll_stat[2*bucket+1]);
  152. seq_puts(m, "\n");
  153. }
  154. return 0;
  155. }
  156. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  157. static const char *const hctx_state_name[] = {
  158. HCTX_STATE_NAME(STOPPED),
  159. HCTX_STATE_NAME(TAG_ACTIVE),
  160. HCTX_STATE_NAME(SCHED_RESTART),
  161. HCTX_STATE_NAME(TAG_WAITING),
  162. HCTX_STATE_NAME(START_ON_RUN),
  163. };
  164. #undef HCTX_STATE_NAME
  165. static int hctx_state_show(void *data, struct seq_file *m)
  166. {
  167. struct blk_mq_hw_ctx *hctx = data;
  168. blk_flags_show(m, hctx->state, hctx_state_name,
  169. ARRAY_SIZE(hctx_state_name));
  170. seq_puts(m, "\n");
  171. return 0;
  172. }
  173. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  174. static const char *const alloc_policy_name[] = {
  175. BLK_TAG_ALLOC_NAME(FIFO),
  176. BLK_TAG_ALLOC_NAME(RR),
  177. };
  178. #undef BLK_TAG_ALLOC_NAME
  179. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  180. static const char *const hctx_flag_name[] = {
  181. HCTX_FLAG_NAME(SHOULD_MERGE),
  182. HCTX_FLAG_NAME(TAG_SHARED),
  183. HCTX_FLAG_NAME(SG_MERGE),
  184. HCTX_FLAG_NAME(BLOCKING),
  185. HCTX_FLAG_NAME(NO_SCHED),
  186. };
  187. #undef HCTX_FLAG_NAME
  188. static int hctx_flags_show(void *data, struct seq_file *m)
  189. {
  190. struct blk_mq_hw_ctx *hctx = data;
  191. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  192. seq_puts(m, "alloc_policy=");
  193. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  194. alloc_policy_name[alloc_policy])
  195. seq_puts(m, alloc_policy_name[alloc_policy]);
  196. else
  197. seq_printf(m, "%d", alloc_policy);
  198. seq_puts(m, " ");
  199. blk_flags_show(m,
  200. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  201. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  202. seq_puts(m, "\n");
  203. return 0;
  204. }
  205. #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
  206. static const char *const op_name[] = {
  207. REQ_OP_NAME(READ),
  208. REQ_OP_NAME(WRITE),
  209. REQ_OP_NAME(FLUSH),
  210. REQ_OP_NAME(DISCARD),
  211. REQ_OP_NAME(ZONE_REPORT),
  212. REQ_OP_NAME(SECURE_ERASE),
  213. REQ_OP_NAME(ZONE_RESET),
  214. REQ_OP_NAME(WRITE_SAME),
  215. REQ_OP_NAME(WRITE_ZEROES),
  216. REQ_OP_NAME(SCSI_IN),
  217. REQ_OP_NAME(SCSI_OUT),
  218. REQ_OP_NAME(DRV_IN),
  219. REQ_OP_NAME(DRV_OUT),
  220. };
  221. #undef REQ_OP_NAME
  222. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  223. static const char *const cmd_flag_name[] = {
  224. CMD_FLAG_NAME(FAILFAST_DEV),
  225. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  226. CMD_FLAG_NAME(FAILFAST_DRIVER),
  227. CMD_FLAG_NAME(SYNC),
  228. CMD_FLAG_NAME(META),
  229. CMD_FLAG_NAME(PRIO),
  230. CMD_FLAG_NAME(NOMERGE),
  231. CMD_FLAG_NAME(IDLE),
  232. CMD_FLAG_NAME(INTEGRITY),
  233. CMD_FLAG_NAME(FUA),
  234. CMD_FLAG_NAME(PREFLUSH),
  235. CMD_FLAG_NAME(RAHEAD),
  236. CMD_FLAG_NAME(BACKGROUND),
  237. CMD_FLAG_NAME(NOUNMAP),
  238. CMD_FLAG_NAME(NOWAIT),
  239. };
  240. #undef CMD_FLAG_NAME
  241. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  242. static const char *const rqf_name[] = {
  243. RQF_NAME(SORTED),
  244. RQF_NAME(STARTED),
  245. RQF_NAME(QUEUED),
  246. RQF_NAME(SOFTBARRIER),
  247. RQF_NAME(FLUSH_SEQ),
  248. RQF_NAME(MIXED_MERGE),
  249. RQF_NAME(MQ_INFLIGHT),
  250. RQF_NAME(DONTPREP),
  251. RQF_NAME(PREEMPT),
  252. RQF_NAME(COPY_USER),
  253. RQF_NAME(FAILED),
  254. RQF_NAME(QUIET),
  255. RQF_NAME(ELVPRIV),
  256. RQF_NAME(IO_STAT),
  257. RQF_NAME(ALLOCED),
  258. RQF_NAME(PM),
  259. RQF_NAME(HASHED),
  260. RQF_NAME(STATS),
  261. RQF_NAME(SPECIAL_PAYLOAD),
  262. };
  263. #undef RQF_NAME
  264. #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
  265. static const char *const rqaf_name[] = {
  266. RQAF_NAME(COMPLETE),
  267. RQAF_NAME(STARTED),
  268. RQAF_NAME(POLL_SLEPT),
  269. };
  270. #undef RQAF_NAME
  271. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  272. {
  273. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  274. const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
  275. seq_printf(m, "%p {.op=", rq);
  276. if (op < ARRAY_SIZE(op_name) && op_name[op])
  277. seq_printf(m, "%s", op_name[op]);
  278. else
  279. seq_printf(m, "%d", op);
  280. seq_puts(m, ", .cmd_flags=");
  281. blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
  282. ARRAY_SIZE(cmd_flag_name));
  283. seq_puts(m, ", .rq_flags=");
  284. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  285. ARRAY_SIZE(rqf_name));
  286. seq_puts(m, ", .atomic_flags=");
  287. blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
  288. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  289. rq->internal_tag);
  290. if (mq_ops->show_rq)
  291. mq_ops->show_rq(m, rq);
  292. seq_puts(m, "}\n");
  293. return 0;
  294. }
  295. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  296. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  297. {
  298. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  299. }
  300. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  301. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  302. __acquires(&q->requeue_lock)
  303. {
  304. struct request_queue *q = m->private;
  305. spin_lock_irq(&q->requeue_lock);
  306. return seq_list_start(&q->requeue_list, *pos);
  307. }
  308. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  309. {
  310. struct request_queue *q = m->private;
  311. return seq_list_next(v, &q->requeue_list, pos);
  312. }
  313. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  314. __releases(&q->requeue_lock)
  315. {
  316. struct request_queue *q = m->private;
  317. spin_unlock_irq(&q->requeue_lock);
  318. }
  319. static const struct seq_operations queue_requeue_list_seq_ops = {
  320. .start = queue_requeue_list_start,
  321. .next = queue_requeue_list_next,
  322. .stop = queue_requeue_list_stop,
  323. .show = blk_mq_debugfs_rq_show,
  324. };
  325. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  326. __acquires(&hctx->lock)
  327. {
  328. struct blk_mq_hw_ctx *hctx = m->private;
  329. spin_lock(&hctx->lock);
  330. return seq_list_start(&hctx->dispatch, *pos);
  331. }
  332. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  333. {
  334. struct blk_mq_hw_ctx *hctx = m->private;
  335. return seq_list_next(v, &hctx->dispatch, pos);
  336. }
  337. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  338. __releases(&hctx->lock)
  339. {
  340. struct blk_mq_hw_ctx *hctx = m->private;
  341. spin_unlock(&hctx->lock);
  342. }
  343. static const struct seq_operations hctx_dispatch_seq_ops = {
  344. .start = hctx_dispatch_start,
  345. .next = hctx_dispatch_next,
  346. .stop = hctx_dispatch_stop,
  347. .show = blk_mq_debugfs_rq_show,
  348. };
  349. struct show_busy_params {
  350. struct seq_file *m;
  351. struct blk_mq_hw_ctx *hctx;
  352. };
  353. /*
  354. * Note: the state of a request may change while this function is in progress,
  355. * e.g. due to a concurrent blk_mq_finish_request() call.
  356. */
  357. static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
  358. {
  359. const struct show_busy_params *params = data;
  360. if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
  361. test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
  362. __blk_mq_debugfs_rq_show(params->m,
  363. list_entry_rq(&rq->queuelist));
  364. }
  365. static int hctx_busy_show(void *data, struct seq_file *m)
  366. {
  367. struct blk_mq_hw_ctx *hctx = data;
  368. struct show_busy_params params = { .m = m, .hctx = hctx };
  369. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  370. &params);
  371. return 0;
  372. }
  373. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  374. {
  375. struct blk_mq_hw_ctx *hctx = data;
  376. sbitmap_bitmap_show(&hctx->ctx_map, m);
  377. return 0;
  378. }
  379. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  380. struct blk_mq_tags *tags)
  381. {
  382. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  383. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  384. seq_printf(m, "active_queues=%d\n",
  385. atomic_read(&tags->active_queues));
  386. seq_puts(m, "\nbitmap_tags:\n");
  387. sbitmap_queue_show(&tags->bitmap_tags, m);
  388. if (tags->nr_reserved_tags) {
  389. seq_puts(m, "\nbreserved_tags:\n");
  390. sbitmap_queue_show(&tags->breserved_tags, m);
  391. }
  392. }
  393. static int hctx_tags_show(void *data, struct seq_file *m)
  394. {
  395. struct blk_mq_hw_ctx *hctx = data;
  396. struct request_queue *q = hctx->queue;
  397. int res;
  398. res = mutex_lock_interruptible(&q->sysfs_lock);
  399. if (res)
  400. goto out;
  401. if (hctx->tags)
  402. blk_mq_debugfs_tags_show(m, hctx->tags);
  403. mutex_unlock(&q->sysfs_lock);
  404. out:
  405. return res;
  406. }
  407. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  408. {
  409. struct blk_mq_hw_ctx *hctx = data;
  410. struct request_queue *q = hctx->queue;
  411. int res;
  412. res = mutex_lock_interruptible(&q->sysfs_lock);
  413. if (res)
  414. goto out;
  415. if (hctx->tags)
  416. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  417. mutex_unlock(&q->sysfs_lock);
  418. out:
  419. return res;
  420. }
  421. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  422. {
  423. struct blk_mq_hw_ctx *hctx = data;
  424. struct request_queue *q = hctx->queue;
  425. int res;
  426. res = mutex_lock_interruptible(&q->sysfs_lock);
  427. if (res)
  428. goto out;
  429. if (hctx->sched_tags)
  430. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  431. mutex_unlock(&q->sysfs_lock);
  432. out:
  433. return res;
  434. }
  435. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  436. {
  437. struct blk_mq_hw_ctx *hctx = data;
  438. struct request_queue *q = hctx->queue;
  439. int res;
  440. res = mutex_lock_interruptible(&q->sysfs_lock);
  441. if (res)
  442. goto out;
  443. if (hctx->sched_tags)
  444. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  445. mutex_unlock(&q->sysfs_lock);
  446. out:
  447. return res;
  448. }
  449. static int hctx_io_poll_show(void *data, struct seq_file *m)
  450. {
  451. struct blk_mq_hw_ctx *hctx = data;
  452. seq_printf(m, "considered=%lu\n", hctx->poll_considered);
  453. seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
  454. seq_printf(m, "success=%lu\n", hctx->poll_success);
  455. return 0;
  456. }
  457. static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
  458. size_t count, loff_t *ppos)
  459. {
  460. struct blk_mq_hw_ctx *hctx = data;
  461. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  462. return count;
  463. }
  464. static int hctx_dispatched_show(void *data, struct seq_file *m)
  465. {
  466. struct blk_mq_hw_ctx *hctx = data;
  467. int i;
  468. seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  469. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  470. unsigned int d = 1U << (i - 1);
  471. seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
  472. }
  473. seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
  474. return 0;
  475. }
  476. static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
  477. size_t count, loff_t *ppos)
  478. {
  479. struct blk_mq_hw_ctx *hctx = data;
  480. int i;
  481. for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
  482. hctx->dispatched[i] = 0;
  483. return count;
  484. }
  485. static int hctx_queued_show(void *data, struct seq_file *m)
  486. {
  487. struct blk_mq_hw_ctx *hctx = data;
  488. seq_printf(m, "%lu\n", hctx->queued);
  489. return 0;
  490. }
  491. static ssize_t hctx_queued_write(void *data, const char __user *buf,
  492. size_t count, loff_t *ppos)
  493. {
  494. struct blk_mq_hw_ctx *hctx = data;
  495. hctx->queued = 0;
  496. return count;
  497. }
  498. static int hctx_run_show(void *data, struct seq_file *m)
  499. {
  500. struct blk_mq_hw_ctx *hctx = data;
  501. seq_printf(m, "%lu\n", hctx->run);
  502. return 0;
  503. }
  504. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  505. loff_t *ppos)
  506. {
  507. struct blk_mq_hw_ctx *hctx = data;
  508. hctx->run = 0;
  509. return count;
  510. }
  511. static int hctx_active_show(void *data, struct seq_file *m)
  512. {
  513. struct blk_mq_hw_ctx *hctx = data;
  514. seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
  515. return 0;
  516. }
  517. static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
  518. __acquires(&ctx->lock)
  519. {
  520. struct blk_mq_ctx *ctx = m->private;
  521. spin_lock(&ctx->lock);
  522. return seq_list_start(&ctx->rq_list, *pos);
  523. }
  524. static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
  525. {
  526. struct blk_mq_ctx *ctx = m->private;
  527. return seq_list_next(v, &ctx->rq_list, pos);
  528. }
  529. static void ctx_rq_list_stop(struct seq_file *m, void *v)
  530. __releases(&ctx->lock)
  531. {
  532. struct blk_mq_ctx *ctx = m->private;
  533. spin_unlock(&ctx->lock);
  534. }
  535. static const struct seq_operations ctx_rq_list_seq_ops = {
  536. .start = ctx_rq_list_start,
  537. .next = ctx_rq_list_next,
  538. .stop = ctx_rq_list_stop,
  539. .show = blk_mq_debugfs_rq_show,
  540. };
  541. static int ctx_dispatched_show(void *data, struct seq_file *m)
  542. {
  543. struct blk_mq_ctx *ctx = data;
  544. seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
  545. return 0;
  546. }
  547. static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
  548. size_t count, loff_t *ppos)
  549. {
  550. struct blk_mq_ctx *ctx = data;
  551. ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
  552. return count;
  553. }
  554. static int ctx_merged_show(void *data, struct seq_file *m)
  555. {
  556. struct blk_mq_ctx *ctx = data;
  557. seq_printf(m, "%lu\n", ctx->rq_merged);
  558. return 0;
  559. }
  560. static ssize_t ctx_merged_write(void *data, const char __user *buf,
  561. size_t count, loff_t *ppos)
  562. {
  563. struct blk_mq_ctx *ctx = data;
  564. ctx->rq_merged = 0;
  565. return count;
  566. }
  567. static int ctx_completed_show(void *data, struct seq_file *m)
  568. {
  569. struct blk_mq_ctx *ctx = data;
  570. seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
  571. return 0;
  572. }
  573. static ssize_t ctx_completed_write(void *data, const char __user *buf,
  574. size_t count, loff_t *ppos)
  575. {
  576. struct blk_mq_ctx *ctx = data;
  577. ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
  578. return count;
  579. }
  580. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  581. {
  582. const struct blk_mq_debugfs_attr *attr = m->private;
  583. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  584. return attr->show(data, m);
  585. }
  586. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  587. size_t count, loff_t *ppos)
  588. {
  589. struct seq_file *m = file->private_data;
  590. const struct blk_mq_debugfs_attr *attr = m->private;
  591. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  592. /*
  593. * Attributes that only implement .seq_ops are read-only and 'attr' is
  594. * the same with 'data' in this case.
  595. */
  596. if (attr == data || !attr->write)
  597. return -EPERM;
  598. return attr->write(data, buf, count, ppos);
  599. }
  600. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  601. {
  602. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  603. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  604. struct seq_file *m;
  605. int ret;
  606. if (attr->seq_ops) {
  607. ret = seq_open(file, attr->seq_ops);
  608. if (!ret) {
  609. m = file->private_data;
  610. m->private = data;
  611. }
  612. return ret;
  613. }
  614. if (WARN_ON_ONCE(!attr->show))
  615. return -EPERM;
  616. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  617. }
  618. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  619. {
  620. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  621. if (attr->show)
  622. return single_release(inode, file);
  623. else
  624. return seq_release(inode, file);
  625. }
  626. static const struct file_operations blk_mq_debugfs_fops = {
  627. .open = blk_mq_debugfs_open,
  628. .read = seq_read,
  629. .write = blk_mq_debugfs_write,
  630. .llseek = seq_lseek,
  631. .release = blk_mq_debugfs_release,
  632. };
  633. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  634. {"poll_stat", 0400, queue_poll_stat_show},
  635. {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
  636. {"state", 0600, queue_state_show, queue_state_write},
  637. {"write_hints", 0600, queue_write_hint_show, queue_write_hint_store},
  638. {},
  639. };
  640. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  641. {"state", 0400, hctx_state_show},
  642. {"flags", 0400, hctx_flags_show},
  643. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  644. {"busy", 0400, hctx_busy_show},
  645. {"ctx_map", 0400, hctx_ctx_map_show},
  646. {"tags", 0400, hctx_tags_show},
  647. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  648. {"sched_tags", 0400, hctx_sched_tags_show},
  649. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  650. {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
  651. {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
  652. {"queued", 0600, hctx_queued_show, hctx_queued_write},
  653. {"run", 0600, hctx_run_show, hctx_run_write},
  654. {"active", 0400, hctx_active_show},
  655. {},
  656. };
  657. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  658. {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
  659. {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
  660. {"merged", 0600, ctx_merged_show, ctx_merged_write},
  661. {"completed", 0600, ctx_completed_show, ctx_completed_write},
  662. {},
  663. };
  664. static bool debugfs_create_files(struct dentry *parent, void *data,
  665. const struct blk_mq_debugfs_attr *attr)
  666. {
  667. d_inode(parent)->i_private = data;
  668. for (; attr->name; attr++) {
  669. if (!debugfs_create_file(attr->name, attr->mode, parent,
  670. (void *)attr, &blk_mq_debugfs_fops))
  671. return false;
  672. }
  673. return true;
  674. }
  675. int blk_mq_debugfs_register(struct request_queue *q)
  676. {
  677. struct blk_mq_hw_ctx *hctx;
  678. int i;
  679. if (!blk_debugfs_root)
  680. return -ENOENT;
  681. q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  682. blk_debugfs_root);
  683. if (!q->debugfs_dir)
  684. return -ENOMEM;
  685. if (!debugfs_create_files(q->debugfs_dir, q,
  686. blk_mq_debugfs_queue_attrs))
  687. goto err;
  688. /*
  689. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  690. * didn't exist yet (because we don't know what to name the directory
  691. * until the queue is registered to a gendisk).
  692. */
  693. if (q->elevator && !q->sched_debugfs_dir)
  694. blk_mq_debugfs_register_sched(q);
  695. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  696. queue_for_each_hw_ctx(q, hctx, i) {
  697. if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
  698. goto err;
  699. if (q->elevator && !hctx->sched_debugfs_dir &&
  700. blk_mq_debugfs_register_sched_hctx(q, hctx))
  701. goto err;
  702. }
  703. return 0;
  704. err:
  705. blk_mq_debugfs_unregister(q);
  706. return -ENOMEM;
  707. }
  708. void blk_mq_debugfs_unregister(struct request_queue *q)
  709. {
  710. debugfs_remove_recursive(q->debugfs_dir);
  711. q->sched_debugfs_dir = NULL;
  712. q->debugfs_dir = NULL;
  713. }
  714. static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  715. struct blk_mq_ctx *ctx)
  716. {
  717. struct dentry *ctx_dir;
  718. char name[20];
  719. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  720. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  721. if (!ctx_dir)
  722. return -ENOMEM;
  723. if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
  724. return -ENOMEM;
  725. return 0;
  726. }
  727. int blk_mq_debugfs_register_hctx(struct request_queue *q,
  728. struct blk_mq_hw_ctx *hctx)
  729. {
  730. struct blk_mq_ctx *ctx;
  731. char name[20];
  732. int i;
  733. if (!q->debugfs_dir)
  734. return -ENOENT;
  735. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  736. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  737. if (!hctx->debugfs_dir)
  738. return -ENOMEM;
  739. if (!debugfs_create_files(hctx->debugfs_dir, hctx,
  740. blk_mq_debugfs_hctx_attrs))
  741. goto err;
  742. hctx_for_each_ctx(hctx, ctx, i) {
  743. if (blk_mq_debugfs_register_ctx(hctx, ctx))
  744. goto err;
  745. }
  746. return 0;
  747. err:
  748. blk_mq_debugfs_unregister_hctx(hctx);
  749. return -ENOMEM;
  750. }
  751. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  752. {
  753. debugfs_remove_recursive(hctx->debugfs_dir);
  754. hctx->sched_debugfs_dir = NULL;
  755. hctx->debugfs_dir = NULL;
  756. }
  757. int blk_mq_debugfs_register_hctxs(struct request_queue *q)
  758. {
  759. struct blk_mq_hw_ctx *hctx;
  760. int i;
  761. queue_for_each_hw_ctx(q, hctx, i) {
  762. if (blk_mq_debugfs_register_hctx(q, hctx))
  763. return -ENOMEM;
  764. }
  765. return 0;
  766. }
  767. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  768. {
  769. struct blk_mq_hw_ctx *hctx;
  770. int i;
  771. queue_for_each_hw_ctx(q, hctx, i)
  772. blk_mq_debugfs_unregister_hctx(hctx);
  773. }
  774. int blk_mq_debugfs_register_sched(struct request_queue *q)
  775. {
  776. struct elevator_type *e = q->elevator->type;
  777. if (!q->debugfs_dir)
  778. return -ENOENT;
  779. if (!e->queue_debugfs_attrs)
  780. return 0;
  781. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  782. if (!q->sched_debugfs_dir)
  783. return -ENOMEM;
  784. if (!debugfs_create_files(q->sched_debugfs_dir, q,
  785. e->queue_debugfs_attrs))
  786. goto err;
  787. return 0;
  788. err:
  789. blk_mq_debugfs_unregister_sched(q);
  790. return -ENOMEM;
  791. }
  792. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  793. {
  794. debugfs_remove_recursive(q->sched_debugfs_dir);
  795. q->sched_debugfs_dir = NULL;
  796. }
  797. int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  798. struct blk_mq_hw_ctx *hctx)
  799. {
  800. struct elevator_type *e = q->elevator->type;
  801. if (!hctx->debugfs_dir)
  802. return -ENOENT;
  803. if (!e->hctx_debugfs_attrs)
  804. return 0;
  805. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  806. hctx->debugfs_dir);
  807. if (!hctx->sched_debugfs_dir)
  808. return -ENOMEM;
  809. if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  810. e->hctx_debugfs_attrs))
  811. return -ENOMEM;
  812. return 0;
  813. }
  814. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  815. {
  816. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  817. hctx->sched_debugfs_dir = NULL;
  818. }