perf_callchain.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * ARM callchain support
  3. *
  4. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  5. * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based on the ARM OProfile backtrace code.
  8. */
  9. #include <linux/perf_event.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/stacktrace.h>
  12. /*
  13. * The registers we're interested in are at the end of the variable
  14. * length saved register structure. The fp points at the end of this
  15. * structure so the address of this struct is:
  16. * (struct frame_tail *)(xxx->fp)-1
  17. *
  18. * This code has been adapted from the ARM OProfile support.
  19. */
  20. struct frame_tail {
  21. struct frame_tail __user *fp;
  22. unsigned long sp;
  23. unsigned long lr;
  24. } __attribute__((packed));
  25. /*
  26. * Get the return address for a single stackframe and return a pointer to the
  27. * next frame tail.
  28. */
  29. static struct frame_tail __user *
  30. user_backtrace(struct frame_tail __user *tail,
  31. struct perf_callchain_entry_ctx *entry)
  32. {
  33. struct frame_tail buftail;
  34. unsigned long err;
  35. if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
  36. return NULL;
  37. pagefault_disable();
  38. err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
  39. pagefault_enable();
  40. if (err)
  41. return NULL;
  42. perf_callchain_store(entry, buftail.lr);
  43. /*
  44. * Frame pointers should strictly progress back up the stack
  45. * (towards higher addresses).
  46. */
  47. if (tail + 1 >= buftail.fp)
  48. return NULL;
  49. return buftail.fp - 1;
  50. }
  51. void
  52. perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  53. {
  54. struct frame_tail __user *tail;
  55. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  56. /* We don't support guest os callchain now */
  57. return;
  58. }
  59. perf_callchain_store(entry, regs->ARM_pc);
  60. if (!current->mm)
  61. return;
  62. tail = (struct frame_tail __user *)regs->ARM_fp - 1;
  63. while ((entry->nr < entry->max_stack) &&
  64. tail && !((unsigned long)tail & 0x3))
  65. tail = user_backtrace(tail, entry);
  66. }
  67. /*
  68. * Gets called by walk_stackframe() for every stackframe. This will be called
  69. * whist unwinding the stackframe and is like a subroutine return so we use
  70. * the PC.
  71. */
  72. static int
  73. callchain_trace(struct stackframe *fr,
  74. void *data)
  75. {
  76. struct perf_callchain_entry_ctx *entry = data;
  77. perf_callchain_store(entry, fr->pc);
  78. return 0;
  79. }
  80. void
  81. perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  82. {
  83. struct stackframe fr;
  84. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  85. /* We don't support guest os callchain now */
  86. return;
  87. }
  88. arm_get_current_stackframe(regs, &fr);
  89. walk_stackframe(&fr, callchain_trace, entry);
  90. }
  91. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  92. {
  93. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  94. return perf_guest_cbs->get_guest_ip();
  95. return instruction_pointer(regs);
  96. }
  97. unsigned long perf_misc_flags(struct pt_regs *regs)
  98. {
  99. int misc = 0;
  100. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  101. if (perf_guest_cbs->is_user_mode())
  102. misc |= PERF_RECORD_MISC_GUEST_USER;
  103. else
  104. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  105. } else {
  106. if (user_mode(regs))
  107. misc |= PERF_RECORD_MISC_USER;
  108. else
  109. misc |= PERF_RECORD_MISC_KERNEL;
  110. }
  111. return misc;
  112. }