drbd_debugfs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "drbd debugfs: " fmt
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/debugfs.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/stat.h>
  8. #include <linux/jiffies.h>
  9. #include <linux/list.h>
  10. #include "drbd_int.h"
  11. #include "drbd_req.h"
  12. #include "drbd_debugfs.h"
  13. /**********************************************************************
  14. * Whenever you change the file format, remember to bump the version. *
  15. **********************************************************************/
  16. static struct dentry *drbd_debugfs_root;
  17. static struct dentry *drbd_debugfs_version;
  18. static struct dentry *drbd_debugfs_resources;
  19. static struct dentry *drbd_debugfs_minors;
  20. static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
  21. {
  22. if (valid)
  23. seq_printf(m, "\t%d", jiffies_to_msecs(dt));
  24. else
  25. seq_printf(m, "\t-");
  26. }
  27. static void __seq_print_rq_state_bit(struct seq_file *m,
  28. bool is_set, char *sep, const char *set_name, const char *unset_name)
  29. {
  30. if (is_set && set_name) {
  31. seq_putc(m, *sep);
  32. seq_puts(m, set_name);
  33. *sep = '|';
  34. } else if (!is_set && unset_name) {
  35. seq_putc(m, *sep);
  36. seq_puts(m, unset_name);
  37. *sep = '|';
  38. }
  39. }
  40. static void seq_print_rq_state_bit(struct seq_file *m,
  41. bool is_set, char *sep, const char *set_name)
  42. {
  43. __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
  44. }
  45. /* pretty print enum drbd_req_state_bits req->rq_state */
  46. static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
  47. {
  48. unsigned int s = req->rq_state;
  49. char sep = ' ';
  50. seq_printf(m, "\t0x%08x", s);
  51. seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
  52. /* RQ_WRITE ignored, already reported */
  53. seq_puts(m, "\tlocal:");
  54. seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
  55. seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
  56. seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
  57. sep = ' ';
  58. seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
  59. seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
  60. seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
  61. seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
  62. if (sep == ' ')
  63. seq_puts(m, " -");
  64. /* for_each_connection ... */
  65. seq_printf(m, "\tnet:");
  66. sep = ' ';
  67. seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
  68. seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
  69. seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
  70. seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
  71. seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
  72. seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
  73. if (sep == ' ')
  74. seq_puts(m, " -");
  75. seq_printf(m, " :");
  76. sep = ' ';
  77. seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
  78. seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
  79. seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
  80. if (sep == ' ')
  81. seq_puts(m, " -");
  82. seq_printf(m, "\n");
  83. }
  84. static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
  85. {
  86. /* change anything here, fixup header below! */
  87. unsigned int s = req->rq_state;
  88. #define RQ_HDR_1 "epoch\tsector\tsize\trw"
  89. seq_printf(m, "0x%x\t%llu\t%u\t%s",
  90. req->epoch,
  91. (unsigned long long)req->i.sector, req->i.size >> 9,
  92. (s & RQ_WRITE) ? "W" : "R");
  93. #define RQ_HDR_2 "\tstart\tin AL\tsubmit"
  94. seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
  95. seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
  96. seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
  97. #define RQ_HDR_3 "\tsent\tacked\tdone"
  98. seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
  99. seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
  100. seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
  101. #define RQ_HDR_4 "\tstate\n"
  102. seq_print_request_state(m, req);
  103. }
  104. #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
  105. static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
  106. {
  107. seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
  108. seq_print_one_request(m, req, now);
  109. }
  110. static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  111. {
  112. struct drbd_device *device;
  113. unsigned int i;
  114. seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
  115. rcu_read_lock();
  116. idr_for_each_entry(&resource->devices, device, i) {
  117. struct drbd_md_io tmp;
  118. /* In theory this is racy,
  119. * in the sense that there could have been a
  120. * drbd_md_put_buffer(); drbd_md_get_buffer();
  121. * between accessing these members here. */
  122. tmp = device->md_io;
  123. if (atomic_read(&tmp.in_use)) {
  124. seq_printf(m, "%u\t%u\t%d\t",
  125. device->minor, device->vnr,
  126. jiffies_to_msecs(now - tmp.start_jif));
  127. if (time_before(tmp.submit_jif, tmp.start_jif))
  128. seq_puts(m, "-\t");
  129. else
  130. seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
  131. seq_printf(m, "%s\n", tmp.current_use);
  132. }
  133. }
  134. rcu_read_unlock();
  135. }
  136. static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  137. {
  138. struct drbd_device *device;
  139. unsigned int i;
  140. seq_puts(m, "minor\tvnr\tage\t#waiting\n");
  141. rcu_read_lock();
  142. idr_for_each_entry(&resource->devices, device, i) {
  143. unsigned long jif;
  144. struct drbd_request *req;
  145. int n = atomic_read(&device->ap_actlog_cnt);
  146. if (n) {
  147. spin_lock_irq(&device->resource->req_lock);
  148. req = list_first_entry_or_null(&device->pending_master_completion[1],
  149. struct drbd_request, req_pending_master_completion);
  150. /* if the oldest request does not wait for the activity log
  151. * it is not interesting for us here */
  152. if (req && !(req->rq_state & RQ_IN_ACT_LOG))
  153. jif = req->start_jif;
  154. else
  155. req = NULL;
  156. spin_unlock_irq(&device->resource->req_lock);
  157. }
  158. if (n) {
  159. seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
  160. if (req)
  161. seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
  162. else
  163. seq_puts(m, "-\t");
  164. seq_printf(m, "%u\n", n);
  165. }
  166. }
  167. rcu_read_unlock();
  168. }
  169. static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
  170. {
  171. struct drbd_bm_aio_ctx *ctx;
  172. unsigned long start_jif;
  173. unsigned int in_flight;
  174. unsigned int flags;
  175. spin_lock_irq(&device->resource->req_lock);
  176. ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
  177. if (ctx && ctx->done)
  178. ctx = NULL;
  179. if (ctx) {
  180. start_jif = ctx->start_jif;
  181. in_flight = atomic_read(&ctx->in_flight);
  182. flags = ctx->flags;
  183. }
  184. spin_unlock_irq(&device->resource->req_lock);
  185. if (ctx) {
  186. seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
  187. device->minor, device->vnr,
  188. (flags & BM_AIO_READ) ? 'R' : 'W',
  189. jiffies_to_msecs(now - start_jif),
  190. in_flight);
  191. }
  192. }
  193. static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  194. {
  195. struct drbd_device *device;
  196. unsigned int i;
  197. seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
  198. rcu_read_lock();
  199. idr_for_each_entry(&resource->devices, device, i) {
  200. seq_print_device_bitmap_io(m, device, now);
  201. }
  202. rcu_read_unlock();
  203. }
  204. /* pretty print enum peer_req->flags */
  205. static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
  206. {
  207. unsigned long f = peer_req->flags;
  208. char sep = ' ';
  209. __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
  210. __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
  211. seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
  212. seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
  213. seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
  214. seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
  215. seq_putc(m, '\n');
  216. }
  217. static void seq_print_peer_request(struct seq_file *m,
  218. struct drbd_device *device, struct list_head *lh,
  219. unsigned long now)
  220. {
  221. bool reported_preparing = false;
  222. struct drbd_peer_request *peer_req;
  223. list_for_each_entry(peer_req, lh, w.list) {
  224. if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
  225. continue;
  226. if (device)
  227. seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
  228. seq_printf(m, "%llu\t%u\t%c\t%u\t",
  229. (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
  230. (peer_req->flags & EE_WRITE) ? 'W' : 'R',
  231. jiffies_to_msecs(now - peer_req->submit_jif));
  232. seq_print_peer_request_flags(m, peer_req);
  233. if (peer_req->flags & EE_SUBMITTED)
  234. break;
  235. else
  236. reported_preparing = true;
  237. }
  238. }
  239. static void seq_print_device_peer_requests(struct seq_file *m,
  240. struct drbd_device *device, unsigned long now)
  241. {
  242. seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
  243. spin_lock_irq(&device->resource->req_lock);
  244. seq_print_peer_request(m, device, &device->active_ee, now);
  245. seq_print_peer_request(m, device, &device->read_ee, now);
  246. seq_print_peer_request(m, device, &device->sync_ee, now);
  247. spin_unlock_irq(&device->resource->req_lock);
  248. if (test_bit(FLUSH_PENDING, &device->flags)) {
  249. seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
  250. device->minor, device->vnr,
  251. jiffies_to_msecs(now - device->flush_jif));
  252. }
  253. }
  254. static void seq_print_resource_pending_peer_requests(struct seq_file *m,
  255. struct drbd_resource *resource, unsigned long now)
  256. {
  257. struct drbd_device *device;
  258. unsigned int i;
  259. rcu_read_lock();
  260. idr_for_each_entry(&resource->devices, device, i) {
  261. seq_print_device_peer_requests(m, device, now);
  262. }
  263. rcu_read_unlock();
  264. }
  265. static void seq_print_resource_transfer_log_summary(struct seq_file *m,
  266. struct drbd_resource *resource,
  267. struct drbd_connection *connection,
  268. unsigned long now)
  269. {
  270. struct drbd_request *req;
  271. unsigned int count = 0;
  272. unsigned int show_state = 0;
  273. seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
  274. spin_lock_irq(&resource->req_lock);
  275. list_for_each_entry(req, &connection->transfer_log, tl_requests) {
  276. unsigned int tmp = 0;
  277. unsigned int s;
  278. ++count;
  279. /* don't disable irq "forever" */
  280. if (!(count & 0x1ff)) {
  281. struct drbd_request *req_next;
  282. kref_get(&req->kref);
  283. spin_unlock_irq(&resource->req_lock);
  284. cond_resched();
  285. spin_lock_irq(&resource->req_lock);
  286. req_next = list_next_entry(req, tl_requests);
  287. if (kref_put(&req->kref, drbd_req_destroy))
  288. req = req_next;
  289. if (&req->tl_requests == &connection->transfer_log)
  290. break;
  291. }
  292. s = req->rq_state;
  293. /* This is meant to summarize timing issues, to be able to tell
  294. * local disk problems from network problems.
  295. * Skip requests, if we have shown an even older request with
  296. * similar aspects already. */
  297. if (req->master_bio == NULL)
  298. tmp |= 1;
  299. if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
  300. tmp |= 2;
  301. if (s & RQ_NET_MASK) {
  302. if (!(s & RQ_NET_SENT))
  303. tmp |= 4;
  304. if (s & RQ_NET_PENDING)
  305. tmp |= 8;
  306. if (!(s & RQ_NET_DONE))
  307. tmp |= 16;
  308. }
  309. if ((tmp & show_state) == tmp)
  310. continue;
  311. show_state |= tmp;
  312. seq_printf(m, "%u\t", count);
  313. seq_print_minor_vnr_req(m, req, now);
  314. if (show_state == 0x1f)
  315. break;
  316. }
  317. spin_unlock_irq(&resource->req_lock);
  318. }
  319. /* TODO: transfer_log and friends should be moved to resource */
  320. static int in_flight_summary_show(struct seq_file *m, void *pos)
  321. {
  322. struct drbd_resource *resource = m->private;
  323. struct drbd_connection *connection;
  324. unsigned long jif = jiffies;
  325. connection = first_connection(resource);
  326. /* This does not happen, actually.
  327. * But be robust and prepare for future code changes. */
  328. if (!connection || !kref_get_unless_zero(&connection->kref))
  329. return -ESTALE;
  330. /* BUMP me if you change the file format/content/presentation */
  331. seq_printf(m, "v: %u\n\n", 0);
  332. seq_puts(m, "oldest bitmap IO\n");
  333. seq_print_resource_pending_bitmap_io(m, resource, jif);
  334. seq_putc(m, '\n');
  335. seq_puts(m, "meta data IO\n");
  336. seq_print_resource_pending_meta_io(m, resource, jif);
  337. seq_putc(m, '\n');
  338. seq_puts(m, "socket buffer stats\n");
  339. /* for each connection ... once we have more than one */
  340. rcu_read_lock();
  341. if (connection->data.socket) {
  342. /* open coded SIOCINQ, the "relevant" part */
  343. struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
  344. int answ = tp->rcv_nxt - tp->copied_seq;
  345. seq_printf(m, "unread receive buffer: %u Byte\n", answ);
  346. /* open coded SIOCOUTQ, the "relevant" part */
  347. answ = tp->write_seq - tp->snd_una;
  348. seq_printf(m, "unacked send buffer: %u Byte\n", answ);
  349. }
  350. rcu_read_unlock();
  351. seq_putc(m, '\n');
  352. seq_puts(m, "oldest peer requests\n");
  353. seq_print_resource_pending_peer_requests(m, resource, jif);
  354. seq_putc(m, '\n');
  355. seq_puts(m, "application requests waiting for activity log\n");
  356. seq_print_waiting_for_AL(m, resource, jif);
  357. seq_putc(m, '\n');
  358. seq_puts(m, "oldest application requests\n");
  359. seq_print_resource_transfer_log_summary(m, resource, connection, jif);
  360. seq_putc(m, '\n');
  361. jif = jiffies - jif;
  362. if (jif)
  363. seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
  364. kref_put(&connection->kref, drbd_destroy_connection);
  365. return 0;
  366. }
  367. /* make sure at *open* time that the respective object won't go away. */
  368. static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
  369. void *data, struct kref *kref,
  370. void (*release)(struct kref *))
  371. {
  372. struct dentry *parent;
  373. int ret = -ESTALE;
  374. /* Are we still linked,
  375. * or has debugfs_remove() already been called? */
  376. parent = file->f_path.dentry->d_parent;
  377. /* serialize with d_delete() */
  378. inode_lock(d_inode(parent));
  379. /* Make sure the object is still alive */
  380. if (simple_positive(file->f_path.dentry)
  381. && kref_get_unless_zero(kref))
  382. ret = 0;
  383. inode_unlock(d_inode(parent));
  384. if (!ret) {
  385. ret = single_open(file, show, data);
  386. if (ret)
  387. kref_put(kref, release);
  388. }
  389. return ret;
  390. }
  391. static int in_flight_summary_open(struct inode *inode, struct file *file)
  392. {
  393. struct drbd_resource *resource = inode->i_private;
  394. return drbd_single_open(file, in_flight_summary_show, resource,
  395. &resource->kref, drbd_destroy_resource);
  396. }
  397. static int in_flight_summary_release(struct inode *inode, struct file *file)
  398. {
  399. struct drbd_resource *resource = inode->i_private;
  400. kref_put(&resource->kref, drbd_destroy_resource);
  401. return single_release(inode, file);
  402. }
  403. static const struct file_operations in_flight_summary_fops = {
  404. .owner = THIS_MODULE,
  405. .open = in_flight_summary_open,
  406. .read = seq_read,
  407. .llseek = seq_lseek,
  408. .release = in_flight_summary_release,
  409. };
  410. void drbd_debugfs_resource_add(struct drbd_resource *resource)
  411. {
  412. struct dentry *dentry;
  413. if (!drbd_debugfs_resources)
  414. return;
  415. dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
  416. if (IS_ERR_OR_NULL(dentry))
  417. goto fail;
  418. resource->debugfs_res = dentry;
  419. dentry = debugfs_create_dir("volumes", resource->debugfs_res);
  420. if (IS_ERR_OR_NULL(dentry))
  421. goto fail;
  422. resource->debugfs_res_volumes = dentry;
  423. dentry = debugfs_create_dir("connections", resource->debugfs_res);
  424. if (IS_ERR_OR_NULL(dentry))
  425. goto fail;
  426. resource->debugfs_res_connections = dentry;
  427. dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
  428. resource->debugfs_res, resource,
  429. &in_flight_summary_fops);
  430. if (IS_ERR_OR_NULL(dentry))
  431. goto fail;
  432. resource->debugfs_res_in_flight_summary = dentry;
  433. return;
  434. fail:
  435. drbd_debugfs_resource_cleanup(resource);
  436. drbd_err(resource, "failed to create debugfs dentry\n");
  437. }
  438. static void drbd_debugfs_remove(struct dentry **dp)
  439. {
  440. debugfs_remove(*dp);
  441. *dp = NULL;
  442. }
  443. void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
  444. {
  445. /* it is ok to call debugfs_remove(NULL) */
  446. drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
  447. drbd_debugfs_remove(&resource->debugfs_res_connections);
  448. drbd_debugfs_remove(&resource->debugfs_res_volumes);
  449. drbd_debugfs_remove(&resource->debugfs_res);
  450. }
  451. static void seq_print_one_timing_detail(struct seq_file *m,
  452. const struct drbd_thread_timing_details *tdp,
  453. unsigned long now)
  454. {
  455. struct drbd_thread_timing_details td;
  456. /* No locking...
  457. * use temporary assignment to get at consistent data. */
  458. do {
  459. td = *tdp;
  460. } while (td.cb_nr != tdp->cb_nr);
  461. if (!td.cb_addr)
  462. return;
  463. seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
  464. td.cb_nr,
  465. jiffies_to_msecs(now - td.start_jif),
  466. td.caller_fn, td.line,
  467. td.cb_addr);
  468. }
  469. static void seq_print_timing_details(struct seq_file *m,
  470. const char *title,
  471. unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
  472. {
  473. unsigned int start_idx;
  474. unsigned int i;
  475. seq_printf(m, "%s\n", title);
  476. /* If not much is going on, this will result in natural ordering.
  477. * If it is very busy, we will possibly skip events, or even see wrap
  478. * arounds, which could only be avoided with locking.
  479. */
  480. start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
  481. for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
  482. seq_print_one_timing_detail(m, tdp+i, now);
  483. for (i = 0; i < start_idx; i++)
  484. seq_print_one_timing_detail(m, tdp+i, now);
  485. }
  486. static int callback_history_show(struct seq_file *m, void *ignored)
  487. {
  488. struct drbd_connection *connection = m->private;
  489. unsigned long jif = jiffies;
  490. /* BUMP me if you change the file format/content/presentation */
  491. seq_printf(m, "v: %u\n\n", 0);
  492. seq_puts(m, "n\tage\tcallsite\tfn\n");
  493. seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
  494. seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
  495. return 0;
  496. }
  497. static int callback_history_open(struct inode *inode, struct file *file)
  498. {
  499. struct drbd_connection *connection = inode->i_private;
  500. return drbd_single_open(file, callback_history_show, connection,
  501. &connection->kref, drbd_destroy_connection);
  502. }
  503. static int callback_history_release(struct inode *inode, struct file *file)
  504. {
  505. struct drbd_connection *connection = inode->i_private;
  506. kref_put(&connection->kref, drbd_destroy_connection);
  507. return single_release(inode, file);
  508. }
  509. static const struct file_operations connection_callback_history_fops = {
  510. .owner = THIS_MODULE,
  511. .open = callback_history_open,
  512. .read = seq_read,
  513. .llseek = seq_lseek,
  514. .release = callback_history_release,
  515. };
  516. static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
  517. {
  518. struct drbd_connection *connection = m->private;
  519. unsigned long now = jiffies;
  520. struct drbd_request *r1, *r2;
  521. /* BUMP me if you change the file format/content/presentation */
  522. seq_printf(m, "v: %u\n\n", 0);
  523. spin_lock_irq(&connection->resource->req_lock);
  524. r1 = connection->req_next;
  525. if (r1)
  526. seq_print_minor_vnr_req(m, r1, now);
  527. r2 = connection->req_ack_pending;
  528. if (r2 && r2 != r1) {
  529. r1 = r2;
  530. seq_print_minor_vnr_req(m, r1, now);
  531. }
  532. r2 = connection->req_not_net_done;
  533. if (r2 && r2 != r1)
  534. seq_print_minor_vnr_req(m, r2, now);
  535. spin_unlock_irq(&connection->resource->req_lock);
  536. return 0;
  537. }
  538. static int connection_oldest_requests_open(struct inode *inode, struct file *file)
  539. {
  540. struct drbd_connection *connection = inode->i_private;
  541. return drbd_single_open(file, connection_oldest_requests_show, connection,
  542. &connection->kref, drbd_destroy_connection);
  543. }
  544. static int connection_oldest_requests_release(struct inode *inode, struct file *file)
  545. {
  546. struct drbd_connection *connection = inode->i_private;
  547. kref_put(&connection->kref, drbd_destroy_connection);
  548. return single_release(inode, file);
  549. }
  550. static const struct file_operations connection_oldest_requests_fops = {
  551. .owner = THIS_MODULE,
  552. .open = connection_oldest_requests_open,
  553. .read = seq_read,
  554. .llseek = seq_lseek,
  555. .release = connection_oldest_requests_release,
  556. };
  557. void drbd_debugfs_connection_add(struct drbd_connection *connection)
  558. {
  559. struct dentry *conns_dir = connection->resource->debugfs_res_connections;
  560. struct dentry *dentry;
  561. if (!conns_dir)
  562. return;
  563. /* Once we enable mutliple peers,
  564. * these connections will have descriptive names.
  565. * For now, it is just the one connection to the (only) "peer". */
  566. dentry = debugfs_create_dir("peer", conns_dir);
  567. if (IS_ERR_OR_NULL(dentry))
  568. goto fail;
  569. connection->debugfs_conn = dentry;
  570. dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
  571. connection->debugfs_conn, connection,
  572. &connection_callback_history_fops);
  573. if (IS_ERR_OR_NULL(dentry))
  574. goto fail;
  575. connection->debugfs_conn_callback_history = dentry;
  576. dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP,
  577. connection->debugfs_conn, connection,
  578. &connection_oldest_requests_fops);
  579. if (IS_ERR_OR_NULL(dentry))
  580. goto fail;
  581. connection->debugfs_conn_oldest_requests = dentry;
  582. return;
  583. fail:
  584. drbd_debugfs_connection_cleanup(connection);
  585. drbd_err(connection, "failed to create debugfs dentry\n");
  586. }
  587. void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
  588. {
  589. drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
  590. drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
  591. drbd_debugfs_remove(&connection->debugfs_conn);
  592. }
  593. static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
  594. {
  595. struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
  596. seq_printf(m, "%5d %s %s %s", bme->rs_left,
  597. test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
  598. test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
  599. test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
  600. );
  601. }
  602. static int device_resync_extents_show(struct seq_file *m, void *ignored)
  603. {
  604. struct drbd_device *device = m->private;
  605. /* BUMP me if you change the file format/content/presentation */
  606. seq_printf(m, "v: %u\n\n", 0);
  607. if (get_ldev_if_state(device, D_FAILED)) {
  608. lc_seq_printf_stats(m, device->resync);
  609. lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
  610. put_ldev(device);
  611. }
  612. return 0;
  613. }
  614. static int device_act_log_extents_show(struct seq_file *m, void *ignored)
  615. {
  616. struct drbd_device *device = m->private;
  617. /* BUMP me if you change the file format/content/presentation */
  618. seq_printf(m, "v: %u\n\n", 0);
  619. if (get_ldev_if_state(device, D_FAILED)) {
  620. lc_seq_printf_stats(m, device->act_log);
  621. lc_seq_dump_details(m, device->act_log, "", NULL);
  622. put_ldev(device);
  623. }
  624. return 0;
  625. }
  626. static int device_oldest_requests_show(struct seq_file *m, void *ignored)
  627. {
  628. struct drbd_device *device = m->private;
  629. struct drbd_resource *resource = device->resource;
  630. unsigned long now = jiffies;
  631. struct drbd_request *r1, *r2;
  632. int i;
  633. /* BUMP me if you change the file format/content/presentation */
  634. seq_printf(m, "v: %u\n\n", 0);
  635. seq_puts(m, RQ_HDR);
  636. spin_lock_irq(&resource->req_lock);
  637. /* WRITE, then READ */
  638. for (i = 1; i >= 0; --i) {
  639. r1 = list_first_entry_or_null(&device->pending_master_completion[i],
  640. struct drbd_request, req_pending_master_completion);
  641. r2 = list_first_entry_or_null(&device->pending_completion[i],
  642. struct drbd_request, req_pending_local);
  643. if (r1)
  644. seq_print_one_request(m, r1, now);
  645. if (r2 && r2 != r1)
  646. seq_print_one_request(m, r2, now);
  647. }
  648. spin_unlock_irq(&resource->req_lock);
  649. return 0;
  650. }
  651. static int device_data_gen_id_show(struct seq_file *m, void *ignored)
  652. {
  653. struct drbd_device *device = m->private;
  654. struct drbd_md *md;
  655. enum drbd_uuid_index idx;
  656. if (!get_ldev_if_state(device, D_FAILED))
  657. return -ENODEV;
  658. md = &device->ldev->md;
  659. spin_lock_irq(&md->uuid_lock);
  660. for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
  661. seq_printf(m, "0x%016llX\n", md->uuid[idx]);
  662. }
  663. spin_unlock_irq(&md->uuid_lock);
  664. put_ldev(device);
  665. return 0;
  666. }
  667. static int device_ed_gen_id_show(struct seq_file *m, void *ignored)
  668. {
  669. struct drbd_device *device = m->private;
  670. seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid);
  671. return 0;
  672. }
  673. #define drbd_debugfs_device_attr(name) \
  674. static int device_ ## name ## _open(struct inode *inode, struct file *file) \
  675. { \
  676. struct drbd_device *device = inode->i_private; \
  677. return drbd_single_open(file, device_ ## name ## _show, device, \
  678. &device->kref, drbd_destroy_device); \
  679. } \
  680. static int device_ ## name ## _release(struct inode *inode, struct file *file) \
  681. { \
  682. struct drbd_device *device = inode->i_private; \
  683. kref_put(&device->kref, drbd_destroy_device); \
  684. return single_release(inode, file); \
  685. } \
  686. static const struct file_operations device_ ## name ## _fops = { \
  687. .owner = THIS_MODULE, \
  688. .open = device_ ## name ## _open, \
  689. .read = seq_read, \
  690. .llseek = seq_lseek, \
  691. .release = device_ ## name ## _release, \
  692. };
  693. drbd_debugfs_device_attr(oldest_requests)
  694. drbd_debugfs_device_attr(act_log_extents)
  695. drbd_debugfs_device_attr(resync_extents)
  696. drbd_debugfs_device_attr(data_gen_id)
  697. drbd_debugfs_device_attr(ed_gen_id)
  698. void drbd_debugfs_device_add(struct drbd_device *device)
  699. {
  700. struct dentry *vols_dir = device->resource->debugfs_res_volumes;
  701. char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
  702. char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
  703. char *slink_name = NULL;
  704. struct dentry *dentry;
  705. if (!vols_dir || !drbd_debugfs_minors)
  706. return;
  707. snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
  708. dentry = debugfs_create_dir(vnr_buf, vols_dir);
  709. if (IS_ERR_OR_NULL(dentry))
  710. goto fail;
  711. device->debugfs_vol = dentry;
  712. snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
  713. slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
  714. device->resource->name, device->vnr);
  715. if (!slink_name)
  716. goto fail;
  717. dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
  718. kfree(slink_name);
  719. slink_name = NULL;
  720. if (IS_ERR_OR_NULL(dentry))
  721. goto fail;
  722. device->debugfs_minor = dentry;
  723. #define DCF(name) do { \
  724. dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \
  725. device->debugfs_vol, device, \
  726. &device_ ## name ## _fops); \
  727. if (IS_ERR_OR_NULL(dentry)) \
  728. goto fail; \
  729. device->debugfs_vol_ ## name = dentry; \
  730. } while (0)
  731. DCF(oldest_requests);
  732. DCF(act_log_extents);
  733. DCF(resync_extents);
  734. DCF(data_gen_id);
  735. DCF(ed_gen_id);
  736. #undef DCF
  737. return;
  738. fail:
  739. drbd_debugfs_device_cleanup(device);
  740. drbd_err(device, "failed to create debugfs entries\n");
  741. }
  742. void drbd_debugfs_device_cleanup(struct drbd_device *device)
  743. {
  744. drbd_debugfs_remove(&device->debugfs_minor);
  745. drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
  746. drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
  747. drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
  748. drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
  749. drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id);
  750. drbd_debugfs_remove(&device->debugfs_vol);
  751. }
  752. void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
  753. {
  754. struct dentry *conn_dir = peer_device->connection->debugfs_conn;
  755. struct dentry *dentry;
  756. char vnr_buf[8];
  757. if (!conn_dir)
  758. return;
  759. snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
  760. dentry = debugfs_create_dir(vnr_buf, conn_dir);
  761. if (IS_ERR_OR_NULL(dentry))
  762. goto fail;
  763. peer_device->debugfs_peer_dev = dentry;
  764. return;
  765. fail:
  766. drbd_debugfs_peer_device_cleanup(peer_device);
  767. drbd_err(peer_device, "failed to create debugfs entries\n");
  768. }
  769. void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
  770. {
  771. drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
  772. }
  773. static int drbd_version_show(struct seq_file *m, void *ignored)
  774. {
  775. seq_printf(m, "# %s\n", drbd_buildtag());
  776. seq_printf(m, "VERSION=%s\n", REL_VERSION);
  777. seq_printf(m, "API_VERSION=%u\n", API_VERSION);
  778. seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
  779. seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
  780. return 0;
  781. }
  782. static int drbd_version_open(struct inode *inode, struct file *file)
  783. {
  784. return single_open(file, drbd_version_show, NULL);
  785. }
  786. static const struct file_operations drbd_version_fops = {
  787. .owner = THIS_MODULE,
  788. .open = drbd_version_open,
  789. .llseek = seq_lseek,
  790. .read = seq_read,
  791. .release = single_release,
  792. };
  793. /* not __exit, may be indirectly called
  794. * from the module-load-failure path as well. */
  795. void drbd_debugfs_cleanup(void)
  796. {
  797. drbd_debugfs_remove(&drbd_debugfs_resources);
  798. drbd_debugfs_remove(&drbd_debugfs_minors);
  799. drbd_debugfs_remove(&drbd_debugfs_version);
  800. drbd_debugfs_remove(&drbd_debugfs_root);
  801. }
  802. int __init drbd_debugfs_init(void)
  803. {
  804. struct dentry *dentry;
  805. dentry = debugfs_create_dir("drbd", NULL);
  806. if (IS_ERR_OR_NULL(dentry))
  807. goto fail;
  808. drbd_debugfs_root = dentry;
  809. dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
  810. if (IS_ERR_OR_NULL(dentry))
  811. goto fail;
  812. drbd_debugfs_version = dentry;
  813. dentry = debugfs_create_dir("resources", drbd_debugfs_root);
  814. if (IS_ERR_OR_NULL(dentry))
  815. goto fail;
  816. drbd_debugfs_resources = dentry;
  817. dentry = debugfs_create_dir("minors", drbd_debugfs_root);
  818. if (IS_ERR_OR_NULL(dentry))
  819. goto fail;
  820. drbd_debugfs_minors = dentry;
  821. return 0;
  822. fail:
  823. drbd_debugfs_cleanup();
  824. if (dentry)
  825. return PTR_ERR(dentry);
  826. else
  827. return -EINVAL;
  828. }