ftrace.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * ftrace graph code
  3. *
  4. * Copyright (C) 2009-2010 Analog Devices Inc.
  5. * Licensed under the GPL-2 or later.
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/atomic.h>
  12. #include <asm/cacheflush.h>
  13. #ifdef CONFIG_DYNAMIC_FTRACE
  14. static const unsigned char mnop[] = {
  15. 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
  16. 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
  17. };
  18. static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
  19. unsigned long dst)
  20. {
  21. uint32_t pcrel = (dst - src) >> 1;
  22. insn[0] = pcrel >> 16;
  23. insn[1] = 0xe3;
  24. insn[2] = pcrel;
  25. insn[3] = pcrel >> 8;
  26. }
  27. #define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
  28. static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
  29. unsigned long len)
  30. {
  31. int ret = probe_kernel_write((void *)ip, (void *)code, len);
  32. flush_icache_range(ip, ip + len);
  33. return ret;
  34. }
  35. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  36. unsigned long addr)
  37. {
  38. /* Turn the mcount call site into two MNOPs as those are 32bit insns */
  39. return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
  40. }
  41. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  42. {
  43. /* Restore the mcount call site */
  44. unsigned char call[8];
  45. call[0] = 0x67; /* [--SP] = RETS; */
  46. call[1] = 0x01;
  47. bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
  48. call[6] = 0x27; /* RETS = [SP++]; */
  49. call[7] = 0x01;
  50. return ftrace_modify_code(rec->ip, call, sizeof(call));
  51. }
  52. int ftrace_update_ftrace_func(ftrace_func_t func)
  53. {
  54. unsigned char call[4];
  55. unsigned long ip = (unsigned long)&ftrace_call;
  56. bfin_make_pcrel24(call, ip, func);
  57. return ftrace_modify_code(ip, call, sizeof(call));
  58. }
  59. int __init ftrace_dyn_arch_init(void *data)
  60. {
  61. /* return value is done indirectly via data */
  62. *(unsigned long *)data = 0;
  63. return 0;
  64. }
  65. #endif
  66. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  67. # ifdef CONFIG_DYNAMIC_FTRACE
  68. extern void ftrace_graph_call(void);
  69. int ftrace_enable_ftrace_graph_caller(void)
  70. {
  71. unsigned long ip = (unsigned long)&ftrace_graph_call;
  72. uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
  73. jump_pcrel12 |= 0x2000;
  74. return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
  75. }
  76. int ftrace_disable_ftrace_graph_caller(void)
  77. {
  78. return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
  79. }
  80. # endif
  81. /*
  82. * Hook the return address and push it in the stack of return addrs
  83. * in current thread info.
  84. */
  85. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  86. unsigned long frame_pointer)
  87. {
  88. struct ftrace_graph_ent trace;
  89. unsigned long return_hooker = (unsigned long)&return_to_handler;
  90. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  91. return;
  92. if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
  93. frame_pointer) == -EBUSY)
  94. return;
  95. trace.func = self_addr;
  96. /* Only trace if the calling function expects to */
  97. if (!ftrace_graph_entry(&trace)) {
  98. current->curr_ret_stack--;
  99. return;
  100. }
  101. /* all is well in the world ! hijack RETS ... */
  102. *parent = return_hooker;
  103. }
  104. #endif