stacktrace.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * Stack trace management functions
  3. *
  4. * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/stacktrace.h>
  8. #include <linux/export.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/stacktrace.h>
  11. #include <asm/unwind.h>
  12. static int save_stack_address(struct stack_trace *trace, unsigned long addr,
  13. bool nosched)
  14. {
  15. if (nosched && in_sched_functions(addr))
  16. return 0;
  17. if (trace->skip > 0) {
  18. trace->skip--;
  19. return 0;
  20. }
  21. if (trace->nr_entries >= trace->max_entries)
  22. return -1;
  23. trace->entries[trace->nr_entries++] = addr;
  24. return 0;
  25. }
  26. static void __save_stack_trace(struct stack_trace *trace,
  27. struct task_struct *task, struct pt_regs *regs,
  28. bool nosched)
  29. {
  30. struct unwind_state state;
  31. unsigned long addr;
  32. if (regs)
  33. save_stack_address(trace, regs->ip, nosched);
  34. for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
  35. unwind_next_frame(&state)) {
  36. addr = unwind_get_return_address(&state);
  37. if (!addr || save_stack_address(trace, addr, nosched))
  38. break;
  39. }
  40. if (trace->nr_entries < trace->max_entries)
  41. trace->entries[trace->nr_entries++] = ULONG_MAX;
  42. }
  43. /*
  44. * Save stack-backtrace addresses into a stack_trace buffer.
  45. */
  46. void save_stack_trace(struct stack_trace *trace)
  47. {
  48. __save_stack_trace(trace, current, NULL, false);
  49. }
  50. EXPORT_SYMBOL_GPL(save_stack_trace);
  51. void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
  52. {
  53. __save_stack_trace(trace, current, regs, false);
  54. }
  55. void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  56. {
  57. if (!try_get_task_stack(tsk))
  58. return;
  59. __save_stack_trace(trace, tsk, NULL, true);
  60. put_task_stack(tsk);
  61. }
  62. EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  63. /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  64. struct stack_frame_user {
  65. const void __user *next_fp;
  66. unsigned long ret_addr;
  67. };
  68. static int
  69. copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
  70. {
  71. int ret;
  72. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  73. return 0;
  74. ret = 1;
  75. pagefault_disable();
  76. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  77. ret = 0;
  78. pagefault_enable();
  79. return ret;
  80. }
  81. static inline void __save_stack_trace_user(struct stack_trace *trace)
  82. {
  83. const struct pt_regs *regs = task_pt_regs(current);
  84. const void __user *fp = (const void __user *)regs->bp;
  85. if (trace->nr_entries < trace->max_entries)
  86. trace->entries[trace->nr_entries++] = regs->ip;
  87. while (trace->nr_entries < trace->max_entries) {
  88. struct stack_frame_user frame;
  89. frame.next_fp = NULL;
  90. frame.ret_addr = 0;
  91. if (!copy_stack_frame(fp, &frame))
  92. break;
  93. if ((unsigned long)fp < regs->sp)
  94. break;
  95. if (frame.ret_addr) {
  96. trace->entries[trace->nr_entries++] =
  97. frame.ret_addr;
  98. }
  99. if (fp == frame.next_fp)
  100. break;
  101. fp = frame.next_fp;
  102. }
  103. }
  104. void save_stack_trace_user(struct stack_trace *trace)
  105. {
  106. /*
  107. * Trace user stack if we are not a kernel thread
  108. */
  109. if (current->mm) {
  110. __save_stack_trace_user(trace);
  111. }
  112. if (trace->nr_entries < trace->max_entries)
  113. trace->entries[trace->nr_entries++] = ULONG_MAX;
  114. }