trace_stat.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Infrastructure for statistic tracing (histogram output).
  3. *
  4. * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. * Based on the code from trace_branch.c which is
  7. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  8. *
  9. */
  10. #include <linux/list.h>
  11. #include <linux/slab.h>
  12. #include <linux/rbtree.h>
  13. #include <linux/tracefs.h>
  14. #include "trace_stat.h"
  15. #include "trace.h"
  16. /*
  17. * List of stat red-black nodes from a tracer
  18. * We use a such tree to sort quickly the stat
  19. * entries from the tracer.
  20. */
  21. struct stat_node {
  22. struct rb_node node;
  23. void *stat;
  24. };
  25. /* A stat session is the stats output in one file */
  26. struct stat_session {
  27. struct list_head session_list;
  28. struct tracer_stat *ts;
  29. struct rb_root stat_root;
  30. struct mutex stat_mutex;
  31. struct dentry *file;
  32. };
  33. /* All of the sessions currently in use. Each stat file embed one session */
  34. static LIST_HEAD(all_stat_sessions);
  35. static DEFINE_MUTEX(all_stat_sessions_mutex);
  36. /* The root directory for all stat files */
  37. static struct dentry *stat_dir;
  38. static void __reset_stat_session(struct stat_session *session)
  39. {
  40. struct stat_node *snode, *n;
  41. rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
  42. if (session->ts->stat_release)
  43. session->ts->stat_release(snode->stat);
  44. kfree(snode);
  45. }
  46. session->stat_root = RB_ROOT;
  47. }
  48. static void reset_stat_session(struct stat_session *session)
  49. {
  50. mutex_lock(&session->stat_mutex);
  51. __reset_stat_session(session);
  52. mutex_unlock(&session->stat_mutex);
  53. }
  54. static void destroy_session(struct stat_session *session)
  55. {
  56. tracefs_remove(session->file);
  57. __reset_stat_session(session);
  58. mutex_destroy(&session->stat_mutex);
  59. kfree(session);
  60. }
  61. typedef int (*cmp_stat_t)(void *, void *);
  62. static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
  63. {
  64. struct rb_node **new = &(root->rb_node), *parent = NULL;
  65. struct stat_node *data;
  66. data = kzalloc(sizeof(*data), GFP_KERNEL);
  67. if (!data)
  68. return -ENOMEM;
  69. data->stat = stat;
  70. /*
  71. * Figure out where to put new node
  72. * This is a descendent sorting
  73. */
  74. while (*new) {
  75. struct stat_node *this;
  76. int result;
  77. this = container_of(*new, struct stat_node, node);
  78. result = cmp(data->stat, this->stat);
  79. parent = *new;
  80. if (result >= 0)
  81. new = &((*new)->rb_left);
  82. else
  83. new = &((*new)->rb_right);
  84. }
  85. rb_link_node(&data->node, parent, new);
  86. rb_insert_color(&data->node, root);
  87. return 0;
  88. }
  89. /*
  90. * For tracers that don't provide a stat_cmp callback.
  91. * This one will force an insertion as right-most node
  92. * in the rbtree.
  93. */
  94. static int dummy_cmp(void *p1, void *p2)
  95. {
  96. return -1;
  97. }
  98. /*
  99. * Initialize the stat rbtree at each trace_stat file opening.
  100. * All of these copies and sorting are required on all opening
  101. * since the stats could have changed between two file sessions.
  102. */
  103. static int stat_seq_init(struct stat_session *session)
  104. {
  105. struct tracer_stat *ts = session->ts;
  106. struct rb_root *root = &session->stat_root;
  107. void *stat;
  108. int ret = 0;
  109. int i;
  110. mutex_lock(&session->stat_mutex);
  111. __reset_stat_session(session);
  112. if (!ts->stat_cmp)
  113. ts->stat_cmp = dummy_cmp;
  114. stat = ts->stat_start(ts);
  115. if (!stat)
  116. goto exit;
  117. ret = insert_stat(root, stat, ts->stat_cmp);
  118. if (ret)
  119. goto exit;
  120. /*
  121. * Iterate over the tracer stat entries and store them in an rbtree.
  122. */
  123. for (i = 1; ; i++) {
  124. stat = ts->stat_next(stat, i);
  125. /* End of insertion */
  126. if (!stat)
  127. break;
  128. ret = insert_stat(root, stat, ts->stat_cmp);
  129. if (ret)
  130. goto exit_free_rbtree;
  131. }
  132. exit:
  133. mutex_unlock(&session->stat_mutex);
  134. return ret;
  135. exit_free_rbtree:
  136. __reset_stat_session(session);
  137. mutex_unlock(&session->stat_mutex);
  138. return ret;
  139. }
  140. static void *stat_seq_start(struct seq_file *s, loff_t *pos)
  141. {
  142. struct stat_session *session = s->private;
  143. struct rb_node *node;
  144. int n = *pos;
  145. int i;
  146. /* Prevent from tracer switch or rbtree modification */
  147. mutex_lock(&session->stat_mutex);
  148. /* If we are in the beginning of the file, print the headers */
  149. if (session->ts->stat_headers) {
  150. if (n == 0)
  151. return SEQ_START_TOKEN;
  152. n--;
  153. }
  154. node = rb_first(&session->stat_root);
  155. for (i = 0; node && i < n; i++)
  156. node = rb_next(node);
  157. return node;
  158. }
  159. static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
  160. {
  161. struct stat_session *session = s->private;
  162. struct rb_node *node = p;
  163. (*pos)++;
  164. if (p == SEQ_START_TOKEN)
  165. return rb_first(&session->stat_root);
  166. return rb_next(node);
  167. }
  168. static void stat_seq_stop(struct seq_file *s, void *p)
  169. {
  170. struct stat_session *session = s->private;
  171. mutex_unlock(&session->stat_mutex);
  172. }
  173. static int stat_seq_show(struct seq_file *s, void *v)
  174. {
  175. struct stat_session *session = s->private;
  176. struct stat_node *l = container_of(v, struct stat_node, node);
  177. if (v == SEQ_START_TOKEN)
  178. return session->ts->stat_headers(s);
  179. return session->ts->stat_show(s, l->stat);
  180. }
  181. static const struct seq_operations trace_stat_seq_ops = {
  182. .start = stat_seq_start,
  183. .next = stat_seq_next,
  184. .stop = stat_seq_stop,
  185. .show = stat_seq_show
  186. };
  187. /* The session stat is refilled and resorted at each stat file opening */
  188. static int tracing_stat_open(struct inode *inode, struct file *file)
  189. {
  190. int ret;
  191. struct seq_file *m;
  192. struct stat_session *session = inode->i_private;
  193. ret = stat_seq_init(session);
  194. if (ret)
  195. return ret;
  196. ret = seq_open(file, &trace_stat_seq_ops);
  197. if (ret) {
  198. reset_stat_session(session);
  199. return ret;
  200. }
  201. m = file->private_data;
  202. m->private = session;
  203. return ret;
  204. }
  205. /*
  206. * Avoid consuming memory with our now useless rbtree.
  207. */
  208. static int tracing_stat_release(struct inode *i, struct file *f)
  209. {
  210. struct stat_session *session = i->i_private;
  211. reset_stat_session(session);
  212. return seq_release(i, f);
  213. }
  214. static const struct file_operations tracing_stat_fops = {
  215. .open = tracing_stat_open,
  216. .read = seq_read,
  217. .llseek = seq_lseek,
  218. .release = tracing_stat_release
  219. };
  220. static int tracing_stat_init(void)
  221. {
  222. struct dentry *d_tracing;
  223. d_tracing = tracing_init_dentry();
  224. if (IS_ERR(d_tracing))
  225. return 0;
  226. stat_dir = tracefs_create_dir("trace_stat", d_tracing);
  227. if (!stat_dir)
  228. pr_warn("Could not create tracefs 'trace_stat' entry\n");
  229. return 0;
  230. }
  231. static int init_stat_file(struct stat_session *session)
  232. {
  233. if (!stat_dir && tracing_stat_init())
  234. return -ENODEV;
  235. session->file = tracefs_create_file(session->ts->name, 0644,
  236. stat_dir,
  237. session, &tracing_stat_fops);
  238. if (!session->file)
  239. return -ENOMEM;
  240. return 0;
  241. }
  242. int register_stat_tracer(struct tracer_stat *trace)
  243. {
  244. struct stat_session *session, *node;
  245. int ret;
  246. if (!trace)
  247. return -EINVAL;
  248. if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
  249. return -EINVAL;
  250. /* Already registered? */
  251. mutex_lock(&all_stat_sessions_mutex);
  252. list_for_each_entry(node, &all_stat_sessions, session_list) {
  253. if (node->ts == trace) {
  254. mutex_unlock(&all_stat_sessions_mutex);
  255. return -EINVAL;
  256. }
  257. }
  258. mutex_unlock(&all_stat_sessions_mutex);
  259. /* Init the session */
  260. session = kzalloc(sizeof(*session), GFP_KERNEL);
  261. if (!session)
  262. return -ENOMEM;
  263. session->ts = trace;
  264. INIT_LIST_HEAD(&session->session_list);
  265. mutex_init(&session->stat_mutex);
  266. ret = init_stat_file(session);
  267. if (ret) {
  268. destroy_session(session);
  269. return ret;
  270. }
  271. /* Register */
  272. mutex_lock(&all_stat_sessions_mutex);
  273. list_add_tail(&session->session_list, &all_stat_sessions);
  274. mutex_unlock(&all_stat_sessions_mutex);
  275. return 0;
  276. }
  277. void unregister_stat_tracer(struct tracer_stat *trace)
  278. {
  279. struct stat_session *node, *tmp;
  280. mutex_lock(&all_stat_sessions_mutex);
  281. list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
  282. if (node->ts == trace) {
  283. list_del(&node->session_list);
  284. destroy_session(node);
  285. break;
  286. }
  287. }
  288. mutex_unlock(&all_stat_sessions_mutex);
  289. }