ftrace.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6. *
  7. * For licencing details, see COPYING.
  8. *
  9. * Defines low-level handling of mcount calls when the kernel
  10. * is compiled with the -pg flag. When using dynamic ftrace, the
  11. * mcount call-sites get patched with NOP till they are enabled.
  12. * All code mutation routines here are called under stop_machine().
  13. */
  14. #include <linux/ftrace.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/opcodes.h>
  18. #include <asm/ftrace.h>
  19. #include "insn.h"
  20. #ifdef CONFIG_THUMB2_KERNEL
  21. #define NOP 0xf85deb04 /* pop.w {lr} */
  22. #else
  23. #define NOP 0xe8bd4000 /* pop {lr} */
  24. #endif
  25. #ifdef CONFIG_DYNAMIC_FTRACE
  26. #ifdef CONFIG_OLD_MCOUNT
  27. #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
  28. #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
  29. #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
  30. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  31. {
  32. return rec->arch.old_mcount ? OLD_NOP : NOP;
  33. }
  34. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  35. {
  36. if (!rec->arch.old_mcount)
  37. return addr;
  38. if (addr == MCOUNT_ADDR)
  39. addr = OLD_MCOUNT_ADDR;
  40. else if (addr == FTRACE_ADDR)
  41. addr = OLD_FTRACE_ADDR;
  42. return addr;
  43. }
  44. #else
  45. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  46. {
  47. return NOP;
  48. }
  49. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  50. {
  51. return addr;
  52. }
  53. #endif
  54. static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
  55. {
  56. return arm_gen_branch_link(pc, addr);
  57. }
  58. static int ftrace_modify_code(unsigned long pc, unsigned long old,
  59. unsigned long new, bool validate)
  60. {
  61. unsigned long replaced;
  62. if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
  63. old = __opcode_to_mem_thumb32(old);
  64. new = __opcode_to_mem_thumb32(new);
  65. } else {
  66. old = __opcode_to_mem_arm(old);
  67. new = __opcode_to_mem_arm(new);
  68. }
  69. if (validate) {
  70. if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
  71. return -EFAULT;
  72. if (replaced != old)
  73. return -EINVAL;
  74. }
  75. if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
  76. return -EPERM;
  77. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  78. return 0;
  79. }
  80. int ftrace_update_ftrace_func(ftrace_func_t func)
  81. {
  82. unsigned long pc;
  83. unsigned long new;
  84. int ret;
  85. pc = (unsigned long)&ftrace_call;
  86. new = ftrace_call_replace(pc, (unsigned long)func);
  87. ret = ftrace_modify_code(pc, 0, new, false);
  88. #ifdef CONFIG_OLD_MCOUNT
  89. if (!ret) {
  90. pc = (unsigned long)&ftrace_call_old;
  91. new = ftrace_call_replace(pc, (unsigned long)func);
  92. ret = ftrace_modify_code(pc, 0, new, false);
  93. }
  94. #endif
  95. return ret;
  96. }
  97. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  98. {
  99. unsigned long new, old;
  100. unsigned long ip = rec->ip;
  101. old = ftrace_nop_replace(rec);
  102. new = ftrace_call_replace(ip, adjust_address(rec, addr));
  103. return ftrace_modify_code(rec->ip, old, new, true);
  104. }
  105. int ftrace_make_nop(struct module *mod,
  106. struct dyn_ftrace *rec, unsigned long addr)
  107. {
  108. unsigned long ip = rec->ip;
  109. unsigned long old;
  110. unsigned long new;
  111. int ret;
  112. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  113. new = ftrace_nop_replace(rec);
  114. ret = ftrace_modify_code(ip, old, new, true);
  115. #ifdef CONFIG_OLD_MCOUNT
  116. if (ret == -EINVAL && addr == MCOUNT_ADDR) {
  117. rec->arch.old_mcount = true;
  118. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  119. new = ftrace_nop_replace(rec);
  120. ret = ftrace_modify_code(ip, old, new, true);
  121. }
  122. #endif
  123. return ret;
  124. }
  125. int __init ftrace_dyn_arch_init(void *data)
  126. {
  127. *(unsigned long *)data = 0;
  128. return 0;
  129. }
  130. #endif /* CONFIG_DYNAMIC_FTRACE */
  131. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  132. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  133. unsigned long frame_pointer)
  134. {
  135. unsigned long return_hooker = (unsigned long) &return_to_handler;
  136. struct ftrace_graph_ent trace;
  137. unsigned long old;
  138. int err;
  139. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  140. return;
  141. old = *parent;
  142. *parent = return_hooker;
  143. trace.func = self_addr;
  144. trace.depth = current->curr_ret_stack + 1;
  145. /* Only trace if the calling function expects to */
  146. if (!ftrace_graph_entry(&trace)) {
  147. *parent = old;
  148. return;
  149. }
  150. err = ftrace_push_return_trace(old, self_addr, &trace.depth,
  151. frame_pointer);
  152. if (err == -EBUSY) {
  153. *parent = old;
  154. return;
  155. }
  156. }
  157. #ifdef CONFIG_DYNAMIC_FTRACE
  158. extern unsigned long ftrace_graph_call;
  159. extern unsigned long ftrace_graph_call_old;
  160. extern void ftrace_graph_caller_old(void);
  161. static int __ftrace_modify_caller(unsigned long *callsite,
  162. void (*func) (void), bool enable)
  163. {
  164. unsigned long caller_fn = (unsigned long) func;
  165. unsigned long pc = (unsigned long) callsite;
  166. unsigned long branch = arm_gen_branch(pc, caller_fn);
  167. unsigned long nop = 0xe1a00000; /* mov r0, r0 */
  168. unsigned long old = enable ? nop : branch;
  169. unsigned long new = enable ? branch : nop;
  170. return ftrace_modify_code(pc, old, new, true);
  171. }
  172. static int ftrace_modify_graph_caller(bool enable)
  173. {
  174. int ret;
  175. ret = __ftrace_modify_caller(&ftrace_graph_call,
  176. ftrace_graph_caller,
  177. enable);
  178. #ifdef CONFIG_OLD_MCOUNT
  179. if (!ret)
  180. ret = __ftrace_modify_caller(&ftrace_graph_call_old,
  181. ftrace_graph_caller_old,
  182. enable);
  183. #endif
  184. return ret;
  185. }
  186. int ftrace_enable_ftrace_graph_caller(void)
  187. {
  188. return ftrace_modify_graph_caller(true);
  189. }
  190. int ftrace_disable_ftrace_graph_caller(void)
  191. {
  192. return ftrace_modify_graph_caller(false);
  193. }
  194. #endif /* CONFIG_DYNAMIC_FTRACE */
  195. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */