trace_functions.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer based function tracer
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Based on code from the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include "trace.h"
  20. static void tracing_start_function_trace(struct trace_array *tr);
  21. static void tracing_stop_function_trace(struct trace_array *tr);
  22. static void
  23. function_trace_call(unsigned long ip, unsigned long parent_ip,
  24. struct ftrace_ops *op, struct pt_regs *pt_regs);
  25. static void
  26. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  27. struct ftrace_ops *op, struct pt_regs *pt_regs);
  28. static struct tracer_flags func_flags;
  29. /* Our option */
  30. enum {
  31. TRACE_FUNC_OPT_STACK = 0x1,
  32. };
  33. static int allocate_ftrace_ops(struct trace_array *tr)
  34. {
  35. struct ftrace_ops *ops;
  36. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  37. if (!ops)
  38. return -ENOMEM;
  39. /* Currently only the non stack verision is supported */
  40. ops->func = function_trace_call;
  41. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
  42. tr->ops = ops;
  43. ops->private = tr;
  44. return 0;
  45. }
  46. int ftrace_create_function_files(struct trace_array *tr,
  47. struct dentry *parent)
  48. {
  49. int ret;
  50. /*
  51. * The top level array uses the "global_ops", and the files are
  52. * created on boot up.
  53. */
  54. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  55. return 0;
  56. ret = allocate_ftrace_ops(tr);
  57. if (ret)
  58. return ret;
  59. ftrace_create_filter_files(tr->ops, parent);
  60. return 0;
  61. }
  62. void ftrace_destroy_function_files(struct trace_array *tr)
  63. {
  64. ftrace_destroy_filter_files(tr->ops);
  65. kfree(tr->ops);
  66. tr->ops = NULL;
  67. }
  68. static int function_trace_init(struct trace_array *tr)
  69. {
  70. ftrace_func_t func;
  71. /*
  72. * Instance trace_arrays get their ops allocated
  73. * at instance creation. Unless it failed
  74. * the allocation.
  75. */
  76. if (!tr->ops)
  77. return -ENOMEM;
  78. /* Currently only the global instance can do stack tracing */
  79. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  80. func_flags.val & TRACE_FUNC_OPT_STACK)
  81. func = function_stack_trace_call;
  82. else
  83. func = function_trace_call;
  84. ftrace_init_array_ops(tr, func);
  85. tr->trace_buffer.cpu = get_cpu();
  86. put_cpu();
  87. tracing_start_cmdline_record();
  88. tracing_start_function_trace(tr);
  89. return 0;
  90. }
  91. static void function_trace_reset(struct trace_array *tr)
  92. {
  93. tracing_stop_function_trace(tr);
  94. tracing_stop_cmdline_record();
  95. ftrace_reset_array_ops(tr);
  96. }
  97. static void function_trace_start(struct trace_array *tr)
  98. {
  99. tracing_reset_online_cpus(&tr->trace_buffer);
  100. }
  101. static void
  102. function_trace_call(unsigned long ip, unsigned long parent_ip,
  103. struct ftrace_ops *op, struct pt_regs *pt_regs)
  104. {
  105. struct trace_array *tr = op->private;
  106. struct trace_array_cpu *data;
  107. unsigned long flags;
  108. int bit;
  109. int cpu;
  110. int pc;
  111. if (unlikely(!tr->function_enabled))
  112. return;
  113. pc = preempt_count();
  114. preempt_disable_notrace();
  115. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  116. if (bit < 0)
  117. goto out;
  118. cpu = smp_processor_id();
  119. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  120. if (!atomic_read(&data->disabled)) {
  121. local_save_flags(flags);
  122. trace_function(tr, ip, parent_ip, flags, pc);
  123. }
  124. trace_clear_recursion(bit);
  125. out:
  126. preempt_enable_notrace();
  127. }
  128. static void
  129. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  130. struct ftrace_ops *op, struct pt_regs *pt_regs)
  131. {
  132. struct trace_array *tr = op->private;
  133. struct trace_array_cpu *data;
  134. unsigned long flags;
  135. long disabled;
  136. int cpu;
  137. int pc;
  138. if (unlikely(!tr->function_enabled))
  139. return;
  140. /*
  141. * Need to use raw, since this must be called before the
  142. * recursive protection is performed.
  143. */
  144. local_irq_save(flags);
  145. cpu = raw_smp_processor_id();
  146. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  147. disabled = atomic_inc_return(&data->disabled);
  148. if (likely(disabled == 1)) {
  149. pc = preempt_count();
  150. trace_function(tr, ip, parent_ip, flags, pc);
  151. /*
  152. * skip over 5 funcs:
  153. * __ftrace_trace_stack,
  154. * __trace_stack,
  155. * function_stack_trace_call
  156. * ftrace_list_func
  157. * ftrace_call
  158. */
  159. __trace_stack(tr, flags, 5, pc);
  160. }
  161. atomic_dec(&data->disabled);
  162. local_irq_restore(flags);
  163. }
  164. static struct tracer_opt func_opts[] = {
  165. #ifdef CONFIG_STACKTRACE
  166. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  167. #endif
  168. { } /* Always set a last empty entry */
  169. };
  170. static struct tracer_flags func_flags = {
  171. .val = 0, /* By default: all flags disabled */
  172. .opts = func_opts
  173. };
  174. static void tracing_start_function_trace(struct trace_array *tr)
  175. {
  176. tr->function_enabled = 0;
  177. register_ftrace_function(tr->ops);
  178. tr->function_enabled = 1;
  179. }
  180. static void tracing_stop_function_trace(struct trace_array *tr)
  181. {
  182. tr->function_enabled = 0;
  183. unregister_ftrace_function(tr->ops);
  184. }
  185. static struct tracer function_trace;
  186. static int
  187. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  188. {
  189. switch (bit) {
  190. case TRACE_FUNC_OPT_STACK:
  191. /* do nothing if already set */
  192. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  193. break;
  194. /* We can change this flag when not running. */
  195. if (tr->current_trace != &function_trace)
  196. break;
  197. unregister_ftrace_function(tr->ops);
  198. if (set) {
  199. tr->ops->func = function_stack_trace_call;
  200. register_ftrace_function(tr->ops);
  201. } else {
  202. tr->ops->func = function_trace_call;
  203. register_ftrace_function(tr->ops);
  204. }
  205. break;
  206. default:
  207. return -EINVAL;
  208. }
  209. return 0;
  210. }
  211. static struct tracer function_trace __tracer_data =
  212. {
  213. .name = "function",
  214. .init = function_trace_init,
  215. .reset = function_trace_reset,
  216. .start = function_trace_start,
  217. .flags = &func_flags,
  218. .set_flag = func_set_flag,
  219. .allow_instances = true,
  220. #ifdef CONFIG_FTRACE_SELFTEST
  221. .selftest = trace_selftest_startup_function,
  222. #endif
  223. };
  224. #ifdef CONFIG_DYNAMIC_FTRACE
  225. static void update_traceon_count(struct ftrace_probe_ops *ops,
  226. unsigned long ip,
  227. struct trace_array *tr, bool on,
  228. void *data)
  229. {
  230. struct ftrace_func_mapper *mapper = data;
  231. long *count;
  232. long old_count;
  233. /*
  234. * Tracing gets disabled (or enabled) once per count.
  235. * This function can be called at the same time on multiple CPUs.
  236. * It is fine if both disable (or enable) tracing, as disabling
  237. * (or enabling) the second time doesn't do anything as the
  238. * state of the tracer is already disabled (or enabled).
  239. * What needs to be synchronized in this case is that the count
  240. * only gets decremented once, even if the tracer is disabled
  241. * (or enabled) twice, as the second one is really a nop.
  242. *
  243. * The memory barriers guarantee that we only decrement the
  244. * counter once. First the count is read to a local variable
  245. * and a read barrier is used to make sure that it is loaded
  246. * before checking if the tracer is in the state we want.
  247. * If the tracer is not in the state we want, then the count
  248. * is guaranteed to be the old count.
  249. *
  250. * Next the tracer is set to the state we want (disabled or enabled)
  251. * then a write memory barrier is used to make sure that
  252. * the new state is visible before changing the counter by
  253. * one minus the old counter. This guarantees that another CPU
  254. * executing this code will see the new state before seeing
  255. * the new counter value, and would not do anything if the new
  256. * counter is seen.
  257. *
  258. * Note, there is no synchronization between this and a user
  259. * setting the tracing_on file. But we currently don't care
  260. * about that.
  261. */
  262. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  263. old_count = *count;
  264. if (old_count <= 0)
  265. return;
  266. /* Make sure we see count before checking tracing state */
  267. smp_rmb();
  268. if (on == !!tracer_tracing_is_on(tr))
  269. return;
  270. if (on)
  271. tracer_tracing_on(tr);
  272. else
  273. tracer_tracing_off(tr);
  274. /* Make sure tracing state is visible before updating count */
  275. smp_wmb();
  276. *count = old_count - 1;
  277. }
  278. static void
  279. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  280. struct trace_array *tr, struct ftrace_probe_ops *ops,
  281. void *data)
  282. {
  283. update_traceon_count(ops, ip, tr, 1, data);
  284. }
  285. static void
  286. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  287. struct trace_array *tr, struct ftrace_probe_ops *ops,
  288. void *data)
  289. {
  290. update_traceon_count(ops, ip, tr, 0, data);
  291. }
  292. static void
  293. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  294. struct trace_array *tr, struct ftrace_probe_ops *ops,
  295. void *data)
  296. {
  297. if (tracer_tracing_is_on(tr))
  298. return;
  299. tracer_tracing_on(tr);
  300. }
  301. static void
  302. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  303. struct trace_array *tr, struct ftrace_probe_ops *ops,
  304. void *data)
  305. {
  306. if (!tracer_tracing_is_on(tr))
  307. return;
  308. tracer_tracing_off(tr);
  309. }
  310. /*
  311. * Skip 4:
  312. * ftrace_stacktrace()
  313. * function_trace_probe_call()
  314. * ftrace_ops_list_func()
  315. * ftrace_call()
  316. */
  317. #define STACK_SKIP 4
  318. static __always_inline void trace_stack(struct trace_array *tr)
  319. {
  320. unsigned long flags;
  321. int pc;
  322. local_save_flags(flags);
  323. pc = preempt_count();
  324. __trace_stack(tr, flags, STACK_SKIP, pc);
  325. }
  326. static void
  327. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  328. struct trace_array *tr, struct ftrace_probe_ops *ops,
  329. void *data)
  330. {
  331. trace_stack(tr);
  332. }
  333. static void
  334. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  335. struct trace_array *tr, struct ftrace_probe_ops *ops,
  336. void *data)
  337. {
  338. struct ftrace_func_mapper *mapper = data;
  339. long *count;
  340. long old_count;
  341. long new_count;
  342. if (!tracing_is_on())
  343. return;
  344. /* unlimited? */
  345. if (!mapper) {
  346. trace_stack(tr);
  347. return;
  348. }
  349. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  350. /*
  351. * Stack traces should only execute the number of times the
  352. * user specified in the counter.
  353. */
  354. do {
  355. old_count = *count;
  356. if (!old_count)
  357. return;
  358. new_count = old_count - 1;
  359. new_count = cmpxchg(count, old_count, new_count);
  360. if (new_count == old_count)
  361. trace_stack(tr);
  362. if (!tracing_is_on())
  363. return;
  364. } while (new_count != old_count);
  365. }
  366. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  367. void *data)
  368. {
  369. struct ftrace_func_mapper *mapper = data;
  370. long *count = NULL;
  371. if (mapper)
  372. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  373. if (count) {
  374. if (*count <= 0)
  375. return 0;
  376. (*count)--;
  377. }
  378. return 1;
  379. }
  380. static void
  381. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  382. struct trace_array *tr, struct ftrace_probe_ops *ops,
  383. void *data)
  384. {
  385. if (update_count(ops, ip, data))
  386. ftrace_dump(DUMP_ALL);
  387. }
  388. /* Only dump the current CPU buffer. */
  389. static void
  390. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  391. struct trace_array *tr, struct ftrace_probe_ops *ops,
  392. void *data)
  393. {
  394. if (update_count(ops, ip, data))
  395. ftrace_dump(DUMP_ORIG);
  396. }
  397. static int
  398. ftrace_probe_print(const char *name, struct seq_file *m,
  399. unsigned long ip, struct ftrace_probe_ops *ops,
  400. void *data)
  401. {
  402. struct ftrace_func_mapper *mapper = data;
  403. long *count = NULL;
  404. seq_printf(m, "%ps:%s", (void *)ip, name);
  405. if (mapper)
  406. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  407. if (count)
  408. seq_printf(m, ":count=%ld\n", *count);
  409. else
  410. seq_puts(m, ":unlimited\n");
  411. return 0;
  412. }
  413. static int
  414. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  415. struct ftrace_probe_ops *ops,
  416. void *data)
  417. {
  418. return ftrace_probe_print("traceon", m, ip, ops, data);
  419. }
  420. static int
  421. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  422. struct ftrace_probe_ops *ops, void *data)
  423. {
  424. return ftrace_probe_print("traceoff", m, ip, ops, data);
  425. }
  426. static int
  427. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  428. struct ftrace_probe_ops *ops, void *data)
  429. {
  430. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  431. }
  432. static int
  433. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  434. struct ftrace_probe_ops *ops, void *data)
  435. {
  436. return ftrace_probe_print("dump", m, ip, ops, data);
  437. }
  438. static int
  439. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  440. struct ftrace_probe_ops *ops, void *data)
  441. {
  442. return ftrace_probe_print("cpudump", m, ip, ops, data);
  443. }
  444. static int
  445. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  446. unsigned long ip, void *init_data, void **data)
  447. {
  448. struct ftrace_func_mapper *mapper = *data;
  449. if (!mapper) {
  450. mapper = allocate_ftrace_func_mapper();
  451. if (!mapper)
  452. return -ENOMEM;
  453. *data = mapper;
  454. }
  455. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  456. }
  457. static void
  458. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  459. unsigned long ip, void *data)
  460. {
  461. struct ftrace_func_mapper *mapper = data;
  462. if (!ip) {
  463. free_ftrace_func_mapper(mapper, NULL);
  464. return;
  465. }
  466. ftrace_func_mapper_remove_ip(mapper, ip);
  467. }
  468. static struct ftrace_probe_ops traceon_count_probe_ops = {
  469. .func = ftrace_traceon_count,
  470. .print = ftrace_traceon_print,
  471. .init = ftrace_count_init,
  472. .free = ftrace_count_free,
  473. };
  474. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  475. .func = ftrace_traceoff_count,
  476. .print = ftrace_traceoff_print,
  477. .init = ftrace_count_init,
  478. .free = ftrace_count_free,
  479. };
  480. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  481. .func = ftrace_stacktrace_count,
  482. .print = ftrace_stacktrace_print,
  483. .init = ftrace_count_init,
  484. .free = ftrace_count_free,
  485. };
  486. static struct ftrace_probe_ops dump_probe_ops = {
  487. .func = ftrace_dump_probe,
  488. .print = ftrace_dump_print,
  489. .init = ftrace_count_init,
  490. .free = ftrace_count_free,
  491. };
  492. static struct ftrace_probe_ops cpudump_probe_ops = {
  493. .func = ftrace_cpudump_probe,
  494. .print = ftrace_cpudump_print,
  495. };
  496. static struct ftrace_probe_ops traceon_probe_ops = {
  497. .func = ftrace_traceon,
  498. .print = ftrace_traceon_print,
  499. };
  500. static struct ftrace_probe_ops traceoff_probe_ops = {
  501. .func = ftrace_traceoff,
  502. .print = ftrace_traceoff_print,
  503. };
  504. static struct ftrace_probe_ops stacktrace_probe_ops = {
  505. .func = ftrace_stacktrace,
  506. .print = ftrace_stacktrace_print,
  507. };
  508. static int
  509. ftrace_trace_probe_callback(struct trace_array *tr,
  510. struct ftrace_probe_ops *ops,
  511. struct ftrace_hash *hash, char *glob,
  512. char *cmd, char *param, int enable)
  513. {
  514. void *count = (void *)-1;
  515. char *number;
  516. int ret;
  517. /* hash funcs only work with set_ftrace_filter */
  518. if (!enable)
  519. return -EINVAL;
  520. if (glob[0] == '!')
  521. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  522. if (!param)
  523. goto out_reg;
  524. number = strsep(&param, ":");
  525. if (!strlen(number))
  526. goto out_reg;
  527. /*
  528. * We use the callback data field (which is a pointer)
  529. * as our counter.
  530. */
  531. ret = kstrtoul(number, 0, (unsigned long *)&count);
  532. if (ret)
  533. return ret;
  534. out_reg:
  535. ret = register_ftrace_function_probe(glob, tr, ops, count);
  536. return ret < 0 ? ret : 0;
  537. }
  538. static int
  539. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  540. char *glob, char *cmd, char *param, int enable)
  541. {
  542. struct ftrace_probe_ops *ops;
  543. if (!tr)
  544. return -ENODEV;
  545. /* we register both traceon and traceoff to this callback */
  546. if (strcmp(cmd, "traceon") == 0)
  547. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  548. else
  549. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  550. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  551. param, enable);
  552. }
  553. static int
  554. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  555. char *glob, char *cmd, char *param, int enable)
  556. {
  557. struct ftrace_probe_ops *ops;
  558. if (!tr)
  559. return -ENODEV;
  560. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  561. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  562. param, enable);
  563. }
  564. static int
  565. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  566. char *glob, char *cmd, char *param, int enable)
  567. {
  568. struct ftrace_probe_ops *ops;
  569. if (!tr)
  570. return -ENODEV;
  571. ops = &dump_probe_ops;
  572. /* Only dump once. */
  573. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  574. "1", enable);
  575. }
  576. static int
  577. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  578. char *glob, char *cmd, char *param, int enable)
  579. {
  580. struct ftrace_probe_ops *ops;
  581. if (!tr)
  582. return -ENODEV;
  583. ops = &cpudump_probe_ops;
  584. /* Only dump once. */
  585. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  586. "1", enable);
  587. }
  588. static struct ftrace_func_command ftrace_traceon_cmd = {
  589. .name = "traceon",
  590. .func = ftrace_trace_onoff_callback,
  591. };
  592. static struct ftrace_func_command ftrace_traceoff_cmd = {
  593. .name = "traceoff",
  594. .func = ftrace_trace_onoff_callback,
  595. };
  596. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  597. .name = "stacktrace",
  598. .func = ftrace_stacktrace_callback,
  599. };
  600. static struct ftrace_func_command ftrace_dump_cmd = {
  601. .name = "dump",
  602. .func = ftrace_dump_callback,
  603. };
  604. static struct ftrace_func_command ftrace_cpudump_cmd = {
  605. .name = "cpudump",
  606. .func = ftrace_cpudump_callback,
  607. };
  608. static int __init init_func_cmd_traceon(void)
  609. {
  610. int ret;
  611. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  612. if (ret)
  613. return ret;
  614. ret = register_ftrace_command(&ftrace_traceon_cmd);
  615. if (ret)
  616. goto out_free_traceoff;
  617. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  618. if (ret)
  619. goto out_free_traceon;
  620. ret = register_ftrace_command(&ftrace_dump_cmd);
  621. if (ret)
  622. goto out_free_stacktrace;
  623. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  624. if (ret)
  625. goto out_free_dump;
  626. return 0;
  627. out_free_dump:
  628. unregister_ftrace_command(&ftrace_dump_cmd);
  629. out_free_stacktrace:
  630. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  631. out_free_traceon:
  632. unregister_ftrace_command(&ftrace_traceon_cmd);
  633. out_free_traceoff:
  634. unregister_ftrace_command(&ftrace_traceoff_cmd);
  635. return ret;
  636. }
  637. #else
  638. static inline int init_func_cmd_traceon(void)
  639. {
  640. return 0;
  641. }
  642. #endif /* CONFIG_DYNAMIC_FTRACE */
  643. __init int init_function_trace(void)
  644. {
  645. init_func_cmd_traceon();
  646. return register_tracer(&function_trace);
  647. }