trace_gfs2.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM gfs2
  3. #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_GFS2_H
  5. #include <linux/tracepoint.h>
  6. #include <linux/fs.h>
  7. #include <linux/buffer_head.h>
  8. #include <linux/dlmconstants.h>
  9. #include <linux/gfs2_ondisk.h>
  10. #include <linux/writeback.h>
  11. #include "incore.h"
  12. #include "glock.h"
  13. #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
  14. #define glock_trace_name(x) __print_symbolic(x, \
  15. dlm_state_name(IV), \
  16. dlm_state_name(NL), \
  17. dlm_state_name(CR), \
  18. dlm_state_name(CW), \
  19. dlm_state_name(PR), \
  20. dlm_state_name(PW), \
  21. dlm_state_name(EX))
  22. #define block_state_name(x) __print_symbolic(x, \
  23. { GFS2_BLKST_FREE, "free" }, \
  24. { GFS2_BLKST_USED, "used" }, \
  25. { GFS2_BLKST_DINODE, "dinode" }, \
  26. { GFS2_BLKST_UNLINKED, "unlinked" })
  27. #define show_glock_flags(flags) __print_flags(flags, "", \
  28. {(1UL << GLF_LOCK), "l" }, \
  29. {(1UL << GLF_DEMOTE), "D" }, \
  30. {(1UL << GLF_PENDING_DEMOTE), "d" }, \
  31. {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
  32. {(1UL << GLF_DIRTY), "y" }, \
  33. {(1UL << GLF_LFLUSH), "f" }, \
  34. {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
  35. {(1UL << GLF_REPLY_PENDING), "r" }, \
  36. {(1UL << GLF_INITIAL), "I" }, \
  37. {(1UL << GLF_FROZEN), "F" }, \
  38. {(1UL << GLF_QUEUED), "q" }, \
  39. {(1UL << GLF_LRU), "L" }, \
  40. {(1UL << GLF_OBJECT), "o" })
  41. #ifndef NUMPTY
  42. #define NUMPTY
  43. static inline u8 glock_trace_state(unsigned int state)
  44. {
  45. switch(state) {
  46. case LM_ST_SHARED:
  47. return DLM_LOCK_PR;
  48. case LM_ST_DEFERRED:
  49. return DLM_LOCK_CW;
  50. case LM_ST_EXCLUSIVE:
  51. return DLM_LOCK_EX;
  52. }
  53. return DLM_LOCK_NL;
  54. }
  55. #endif
  56. /* Section 1 - Locking
  57. *
  58. * Objectives:
  59. * Latency: Remote demote request to state change
  60. * Latency: Local lock request to state change
  61. * Latency: State change to lock grant
  62. * Correctness: Ordering of local lock state vs. I/O requests
  63. * Correctness: Responses to remote demote requests
  64. */
  65. /* General glock state change (DLM lock request completes) */
  66. TRACE_EVENT(gfs2_glock_state_change,
  67. TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
  68. TP_ARGS(gl, new_state),
  69. TP_STRUCT__entry(
  70. __field( dev_t, dev )
  71. __field( u64, glnum )
  72. __field( u32, gltype )
  73. __field( u8, cur_state )
  74. __field( u8, new_state )
  75. __field( u8, dmt_state )
  76. __field( u8, tgt_state )
  77. __field( unsigned long, flags )
  78. ),
  79. TP_fast_assign(
  80. __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
  81. __entry->glnum = gl->gl_name.ln_number;
  82. __entry->gltype = gl->gl_name.ln_type;
  83. __entry->cur_state = glock_trace_state(gl->gl_state);
  84. __entry->new_state = glock_trace_state(new_state);
  85. __entry->tgt_state = glock_trace_state(gl->gl_target);
  86. __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
  87. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  88. ),
  89. TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
  90. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  91. (unsigned long long)__entry->glnum,
  92. glock_trace_name(__entry->cur_state),
  93. glock_trace_name(__entry->new_state),
  94. glock_trace_name(__entry->tgt_state),
  95. glock_trace_name(__entry->dmt_state),
  96. show_glock_flags(__entry->flags))
  97. );
  98. /* State change -> unlocked, glock is being deallocated */
  99. TRACE_EVENT(gfs2_glock_put,
  100. TP_PROTO(const struct gfs2_glock *gl),
  101. TP_ARGS(gl),
  102. TP_STRUCT__entry(
  103. __field( dev_t, dev )
  104. __field( u64, glnum )
  105. __field( u32, gltype )
  106. __field( u8, cur_state )
  107. __field( unsigned long, flags )
  108. ),
  109. TP_fast_assign(
  110. __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
  111. __entry->gltype = gl->gl_name.ln_type;
  112. __entry->glnum = gl->gl_name.ln_number;
  113. __entry->cur_state = glock_trace_state(gl->gl_state);
  114. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  115. ),
  116. TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
  117. MAJOR(__entry->dev), MINOR(__entry->dev),
  118. __entry->gltype, (unsigned long long)__entry->glnum,
  119. glock_trace_name(__entry->cur_state),
  120. glock_trace_name(DLM_LOCK_IV),
  121. show_glock_flags(__entry->flags))
  122. );
  123. /* Callback (local or remote) requesting lock demotion */
  124. TRACE_EVENT(gfs2_demote_rq,
  125. TP_PROTO(const struct gfs2_glock *gl),
  126. TP_ARGS(gl),
  127. TP_STRUCT__entry(
  128. __field( dev_t, dev )
  129. __field( u64, glnum )
  130. __field( u32, gltype )
  131. __field( u8, cur_state )
  132. __field( u8, dmt_state )
  133. __field( unsigned long, flags )
  134. ),
  135. TP_fast_assign(
  136. __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
  137. __entry->gltype = gl->gl_name.ln_type;
  138. __entry->glnum = gl->gl_name.ln_number;
  139. __entry->cur_state = glock_trace_state(gl->gl_state);
  140. __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
  141. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  142. ),
  143. TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
  144. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  145. (unsigned long long)__entry->glnum,
  146. glock_trace_name(__entry->cur_state),
  147. glock_trace_name(__entry->dmt_state),
  148. show_glock_flags(__entry->flags))
  149. );
  150. /* Promotion/grant of a glock */
  151. TRACE_EVENT(gfs2_promote,
  152. TP_PROTO(const struct gfs2_holder *gh, int first),
  153. TP_ARGS(gh, first),
  154. TP_STRUCT__entry(
  155. __field( dev_t, dev )
  156. __field( u64, glnum )
  157. __field( u32, gltype )
  158. __field( int, first )
  159. __field( u8, state )
  160. ),
  161. TP_fast_assign(
  162. __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
  163. __entry->glnum = gh->gh_gl->gl_name.ln_number;
  164. __entry->gltype = gh->gh_gl->gl_name.ln_type;
  165. __entry->first = first;
  166. __entry->state = glock_trace_state(gh->gh_state);
  167. ),
  168. TP_printk("%u,%u glock %u:%llu promote %s %s",
  169. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  170. (unsigned long long)__entry->glnum,
  171. __entry->first ? "first": "other",
  172. glock_trace_name(__entry->state))
  173. );
  174. /* Queue/dequeue a lock request */
  175. TRACE_EVENT(gfs2_glock_queue,
  176. TP_PROTO(const struct gfs2_holder *gh, int queue),
  177. TP_ARGS(gh, queue),
  178. TP_STRUCT__entry(
  179. __field( dev_t, dev )
  180. __field( u64, glnum )
  181. __field( u32, gltype )
  182. __field( int, queue )
  183. __field( u8, state )
  184. ),
  185. TP_fast_assign(
  186. __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
  187. __entry->glnum = gh->gh_gl->gl_name.ln_number;
  188. __entry->gltype = gh->gh_gl->gl_name.ln_type;
  189. __entry->queue = queue;
  190. __entry->state = glock_trace_state(gh->gh_state);
  191. ),
  192. TP_printk("%u,%u glock %u:%llu %squeue %s",
  193. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  194. (unsigned long long)__entry->glnum,
  195. __entry->queue ? "" : "de",
  196. glock_trace_name(__entry->state))
  197. );
  198. /* Section 2 - Log/journal
  199. *
  200. * Objectives:
  201. * Latency: Log flush time
  202. * Correctness: pin/unpin vs. disk I/O ordering
  203. * Performance: Log usage stats
  204. */
  205. /* Pin/unpin a block in the log */
  206. TRACE_EVENT(gfs2_pin,
  207. TP_PROTO(const struct gfs2_bufdata *bd, int pin),
  208. TP_ARGS(bd, pin),
  209. TP_STRUCT__entry(
  210. __field( dev_t, dev )
  211. __field( int, pin )
  212. __field( u32, len )
  213. __field( sector_t, block )
  214. __field( u64, ino )
  215. ),
  216. TP_fast_assign(
  217. __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
  218. __entry->pin = pin;
  219. __entry->len = bd->bd_bh->b_size;
  220. __entry->block = bd->bd_bh->b_blocknr;
  221. __entry->ino = bd->bd_gl->gl_name.ln_number;
  222. ),
  223. TP_printk("%u,%u log %s %llu/%lu inode %llu",
  224. MAJOR(__entry->dev), MINOR(__entry->dev),
  225. __entry->pin ? "pin" : "unpin",
  226. (unsigned long long)__entry->block,
  227. (unsigned long)__entry->len,
  228. (unsigned long long)__entry->ino)
  229. );
  230. /* Flushing the log */
  231. TRACE_EVENT(gfs2_log_flush,
  232. TP_PROTO(const struct gfs2_sbd *sdp, int start),
  233. TP_ARGS(sdp, start),
  234. TP_STRUCT__entry(
  235. __field( dev_t, dev )
  236. __field( int, start )
  237. __field( u64, log_seq )
  238. ),
  239. TP_fast_assign(
  240. __entry->dev = sdp->sd_vfs->s_dev;
  241. __entry->start = start;
  242. __entry->log_seq = sdp->sd_log_sequence;
  243. ),
  244. TP_printk("%u,%u log flush %s %llu",
  245. MAJOR(__entry->dev), MINOR(__entry->dev),
  246. __entry->start ? "start" : "end",
  247. (unsigned long long)__entry->log_seq)
  248. );
  249. /* Reserving/releasing blocks in the log */
  250. TRACE_EVENT(gfs2_log_blocks,
  251. TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
  252. TP_ARGS(sdp, blocks),
  253. TP_STRUCT__entry(
  254. __field( dev_t, dev )
  255. __field( int, blocks )
  256. ),
  257. TP_fast_assign(
  258. __entry->dev = sdp->sd_vfs->s_dev;
  259. __entry->blocks = blocks;
  260. ),
  261. TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
  262. MINOR(__entry->dev), __entry->blocks)
  263. );
  264. /* Writing back the AIL */
  265. TRACE_EVENT(gfs2_ail_flush,
  266. TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
  267. TP_ARGS(sdp, wbc, start),
  268. TP_STRUCT__entry(
  269. __field( dev_t, dev )
  270. __field( int, start )
  271. __field( int, sync_mode )
  272. __field( long, nr_to_write )
  273. ),
  274. TP_fast_assign(
  275. __entry->dev = sdp->sd_vfs->s_dev;
  276. __entry->start = start;
  277. __entry->sync_mode = wbc->sync_mode;
  278. __entry->nr_to_write = wbc->nr_to_write;
  279. ),
  280. TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
  281. MINOR(__entry->dev), __entry->start ? "start" : "end",
  282. __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
  283. __entry->nr_to_write)
  284. );
  285. /* Section 3 - bmap
  286. *
  287. * Objectives:
  288. * Latency: Bmap request time
  289. * Performance: Block allocator tracing
  290. * Correctness: Test of disard generation vs. blocks allocated
  291. */
  292. /* Map an extent of blocks, possibly a new allocation */
  293. TRACE_EVENT(gfs2_bmap,
  294. TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
  295. sector_t lblock, int create, int errno),
  296. TP_ARGS(ip, bh, lblock, create, errno),
  297. TP_STRUCT__entry(
  298. __field( dev_t, dev )
  299. __field( sector_t, lblock )
  300. __field( sector_t, pblock )
  301. __field( u64, inum )
  302. __field( unsigned long, state )
  303. __field( u32, len )
  304. __field( int, create )
  305. __field( int, errno )
  306. ),
  307. TP_fast_assign(
  308. __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
  309. __entry->lblock = lblock;
  310. __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
  311. __entry->inum = ip->i_no_addr;
  312. __entry->state = bh->b_state;
  313. __entry->len = bh->b_size;
  314. __entry->create = create;
  315. __entry->errno = errno;
  316. ),
  317. TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
  318. MAJOR(__entry->dev), MINOR(__entry->dev),
  319. (unsigned long long)__entry->inum,
  320. (unsigned long long)__entry->lblock,
  321. (unsigned long)__entry->len,
  322. (unsigned long long)__entry->pblock,
  323. __entry->state, __entry->create ? "create " : "nocreate",
  324. __entry->errno)
  325. );
  326. /* Keep track of blocks as they are allocated/freed */
  327. TRACE_EVENT(gfs2_block_alloc,
  328. TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
  329. u8 block_state),
  330. TP_ARGS(ip, block, len, block_state),
  331. TP_STRUCT__entry(
  332. __field( dev_t, dev )
  333. __field( u64, start )
  334. __field( u64, inum )
  335. __field( u32, len )
  336. __field( u8, block_state )
  337. ),
  338. TP_fast_assign(
  339. __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
  340. __entry->start = block;
  341. __entry->inum = ip->i_no_addr;
  342. __entry->len = len;
  343. __entry->block_state = block_state;
  344. ),
  345. TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
  346. MAJOR(__entry->dev), MINOR(__entry->dev),
  347. (unsigned long long)__entry->inum,
  348. (unsigned long long)__entry->start,
  349. (unsigned long)__entry->len,
  350. block_state_name(__entry->block_state))
  351. );
  352. #endif /* _TRACE_GFS2_H */
  353. /* This part must be outside protection */
  354. #undef TRACE_INCLUDE_PATH
  355. #define TRACE_INCLUDE_PATH .
  356. #define TRACE_INCLUDE_FILE trace_gfs2
  357. #include <trace/define_trace.h>