ftrace-entry.S 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * mcount and friends -- ftrace stuff
  3. *
  4. * Copyright (C) 2009-2010 Analog Devices Inc.
  5. * Licensed under the GPL-2 or later.
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/ftrace.h>
  9. .text
  10. #ifdef CONFIG_DYNAMIC_FTRACE
  11. /* Simple stub so we can boot the kernel until runtime patching has
  12. * disabled all calls to this. Then it'll be unused.
  13. */
  14. ENTRY(__mcount)
  15. # if ANOMALY_05000371
  16. nop; nop; nop; nop;
  17. # endif
  18. rts;
  19. ENDPROC(__mcount)
  20. /* GCC will have called us before setting up the function prologue, so we
  21. * can clobber the normal scratch registers, but we need to make sure to
  22. * save/restore the registers used for argument passing (R0-R2) in case
  23. * the profiled function is using them. With data registers, R3 is the
  24. * only one we can blow away. With pointer registers, we have P0-P2.
  25. *
  26. * Upon entry, the RETS will point to the top of the current profiled
  27. * function. And since GCC pushed the previous RETS for us, the previous
  28. * function will be waiting there. mmmm pie.
  29. */
  30. ENTRY(_ftrace_caller)
  31. # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  32. /* optional micro optimization: return if stopped */
  33. p1.l = _function_trace_stop;
  34. p1.h = _function_trace_stop;
  35. r3 = [p1];
  36. cc = r3 == 0;
  37. if ! cc jump _ftrace_stub (bp);
  38. # endif
  39. /* save first/second/third function arg and the return register */
  40. [--sp] = r2;
  41. [--sp] = r0;
  42. [--sp] = r1;
  43. [--sp] = rets;
  44. /* function_trace_call(unsigned long ip, unsigned long parent_ip):
  45. * ip: this point was called by ...
  46. * parent_ip: ... this function
  47. * the ip itself will need adjusting for the mcount call
  48. */
  49. r0 = rets;
  50. r1 = [sp + 16]; /* skip the 4 local regs on stack */
  51. r0 += -MCOUNT_INSN_SIZE;
  52. .globl _ftrace_call
  53. _ftrace_call:
  54. call _ftrace_stub
  55. # ifdef CONFIG_FUNCTION_GRAPH_TRACER
  56. .globl _ftrace_graph_call
  57. _ftrace_graph_call:
  58. nop; /* jump _ftrace_graph_caller; */
  59. # endif
  60. /* restore state and get out of dodge */
  61. .Lfinish_trace:
  62. rets = [sp++];
  63. r1 = [sp++];
  64. r0 = [sp++];
  65. r2 = [sp++];
  66. .globl _ftrace_stub
  67. _ftrace_stub:
  68. rts;
  69. ENDPROC(_ftrace_caller)
  70. #else
  71. /* See documentation for _ftrace_caller */
  72. ENTRY(__mcount)
  73. # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  74. /* optional micro optimization: return if stopped */
  75. p1.l = _function_trace_stop;
  76. p1.h = _function_trace_stop;
  77. r3 = [p1];
  78. cc = r3 == 0;
  79. if ! cc jump _ftrace_stub (bp);
  80. # endif
  81. /* save third function arg early so we can do testing below */
  82. [--sp] = r2;
  83. /* load the function pointer to the tracer */
  84. p0.l = _ftrace_trace_function;
  85. p0.h = _ftrace_trace_function;
  86. r3 = [p0];
  87. /* optional micro optimization: don't call the stub tracer */
  88. r2.l = _ftrace_stub;
  89. r2.h = _ftrace_stub;
  90. cc = r2 == r3;
  91. if ! cc jump .Ldo_trace;
  92. # ifdef CONFIG_FUNCTION_GRAPH_TRACER
  93. /* if the ftrace_graph_return function pointer is not set to
  94. * the ftrace_stub entry, call prepare_ftrace_return().
  95. */
  96. p0.l = _ftrace_graph_return;
  97. p0.h = _ftrace_graph_return;
  98. r3 = [p0];
  99. cc = r2 == r3;
  100. if ! cc jump _ftrace_graph_caller;
  101. /* similarly, if the ftrace_graph_entry function pointer is not
  102. * set to the ftrace_graph_entry_stub entry, ...
  103. */
  104. p0.l = _ftrace_graph_entry;
  105. p0.h = _ftrace_graph_entry;
  106. r2.l = _ftrace_graph_entry_stub;
  107. r2.h = _ftrace_graph_entry_stub;
  108. r3 = [p0];
  109. cc = r2 == r3;
  110. if ! cc jump _ftrace_graph_caller;
  111. # endif
  112. r2 = [sp++];
  113. rts;
  114. .Ldo_trace:
  115. /* save first/second function arg and the return register */
  116. [--sp] = r0;
  117. [--sp] = r1;
  118. [--sp] = rets;
  119. /* setup the tracer function */
  120. p0 = r3;
  121. /* function_trace_call(unsigned long ip, unsigned long parent_ip):
  122. * ip: this point was called by ...
  123. * parent_ip: ... this function
  124. * the ip itself will need adjusting for the mcount call
  125. */
  126. r0 = rets;
  127. r1 = [sp + 16]; /* skip the 4 local regs on stack */
  128. r0 += -MCOUNT_INSN_SIZE;
  129. /* call the tracer */
  130. call (p0);
  131. /* restore state and get out of dodge */
  132. .Lfinish_trace:
  133. rets = [sp++];
  134. r1 = [sp++];
  135. r0 = [sp++];
  136. r2 = [sp++];
  137. .globl _ftrace_stub
  138. _ftrace_stub:
  139. rts;
  140. ENDPROC(__mcount)
  141. #endif
  142. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  143. /* The prepare_ftrace_return() function is similar to the trace function
  144. * except it takes a pointer to the location of the frompc. This is so
  145. * the prepare_ftrace_return() can hijack it temporarily for probing
  146. * purposes.
  147. */
  148. ENTRY(_ftrace_graph_caller)
  149. # ifndef CONFIG_DYNAMIC_FTRACE
  150. /* save first/second function arg and the return register */
  151. [--sp] = r0;
  152. [--sp] = r1;
  153. [--sp] = rets;
  154. /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
  155. r0 = sp; /* unsigned long *parent */
  156. r1 = rets; /* unsigned long self_addr */
  157. # else
  158. r0 = sp; /* unsigned long *parent */
  159. r1 = [sp]; /* unsigned long self_addr */
  160. # endif
  161. # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
  162. r2 = fp; /* unsigned long frame_pointer */
  163. # endif
  164. r0 += 16; /* skip the 4 local regs on stack */
  165. r1 += -MCOUNT_INSN_SIZE;
  166. call _prepare_ftrace_return;
  167. jump .Lfinish_trace;
  168. ENDPROC(_ftrace_graph_caller)
  169. /* Undo the rewrite caused by ftrace_graph_caller(). The common function
  170. * ftrace_return_to_handler() will return the original rets so we can
  171. * restore it and be on our way.
  172. */
  173. ENTRY(_return_to_handler)
  174. /* make sure original return values are saved */
  175. [--sp] = p0;
  176. [--sp] = r0;
  177. [--sp] = r1;
  178. /* get original return address */
  179. # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
  180. r0 = fp; /* Blackfin is sane, so omit this */
  181. # endif
  182. call _ftrace_return_to_handler;
  183. rets = r0;
  184. /* anomaly 05000371 - make sure we have at least three instructions
  185. * between rets setting and the return
  186. */
  187. r1 = [sp++];
  188. r0 = [sp++];
  189. p0 = [sp++];
  190. rts;
  191. ENDPROC(_return_to_handler)
  192. #endif