dwarf-unwind.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #include <linux/compiler.h>
  2. #include <linux/types.h>
  3. #include <unistd.h>
  4. #include "tests.h"
  5. #include "debug.h"
  6. #include "machine.h"
  7. #include "event.h"
  8. #include "unwind.h"
  9. #include "perf_regs.h"
  10. #include "map.h"
  11. #include "thread.h"
  12. #include "callchain.h"
  13. #if defined (__x86_64__) || defined (__i386__) || defined (__powerpc__)
  14. #include "arch-tests.h"
  15. #endif
  16. /* For bsearch. We try to unwind functions in shared object. */
  17. #include <stdlib.h>
  18. static int mmap_handler(struct perf_tool *tool __maybe_unused,
  19. union perf_event *event,
  20. struct perf_sample *sample,
  21. struct machine *machine)
  22. {
  23. return machine__process_mmap2_event(machine, event, sample);
  24. }
  25. static int init_live_machine(struct machine *machine)
  26. {
  27. union perf_event event;
  28. pid_t pid = getpid();
  29. return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
  30. mmap_handler, machine, true, 500);
  31. }
  32. #define MAX_STACK 8
  33. static int unwind_entry(struct unwind_entry *entry, void *arg)
  34. {
  35. unsigned long *cnt = (unsigned long *) arg;
  36. char *symbol = entry->sym ? entry->sym->name : NULL;
  37. static const char *funcs[MAX_STACK] = {
  38. "test__arch_unwind_sample",
  39. "unwind_thread",
  40. "compare",
  41. "bsearch",
  42. "krava_3",
  43. "krava_2",
  44. "krava_1",
  45. "test__dwarf_unwind"
  46. };
  47. /*
  48. * The funcs[MAX_STACK] array index, based on the
  49. * callchain order setup.
  50. */
  51. int idx = callchain_param.order == ORDER_CALLER ?
  52. MAX_STACK - *cnt - 1 : *cnt;
  53. if (*cnt >= MAX_STACK) {
  54. pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
  55. return -1;
  56. }
  57. if (!symbol) {
  58. pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
  59. entry->ip);
  60. return -1;
  61. }
  62. (*cnt)++;
  63. pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
  64. symbol, entry->ip, funcs[idx]);
  65. return strcmp((const char *) symbol, funcs[idx]);
  66. }
  67. __attribute__ ((noinline))
  68. static int unwind_thread(struct thread *thread)
  69. {
  70. struct perf_sample sample;
  71. unsigned long cnt = 0;
  72. int err = -1;
  73. memset(&sample, 0, sizeof(sample));
  74. if (test__arch_unwind_sample(&sample, thread)) {
  75. pr_debug("failed to get unwind sample\n");
  76. goto out;
  77. }
  78. err = unwind__get_entries(unwind_entry, &cnt, thread,
  79. &sample, MAX_STACK);
  80. if (err)
  81. pr_debug("unwind failed\n");
  82. else if (cnt != MAX_STACK) {
  83. pr_debug("got wrong number of stack entries %lu != %d\n",
  84. cnt, MAX_STACK);
  85. err = -1;
  86. }
  87. out:
  88. free(sample.user_stack.data);
  89. free(sample.user_regs.regs);
  90. return err;
  91. }
  92. static int global_unwind_retval = -INT_MAX;
  93. __attribute__ ((noinline))
  94. static int compare(void *p1, void *p2)
  95. {
  96. /* Any possible value should be 'thread' */
  97. struct thread *thread = *(struct thread **)p1;
  98. if (global_unwind_retval == -INT_MAX) {
  99. /* Call unwinder twice for both callchain orders. */
  100. callchain_param.order = ORDER_CALLER;
  101. global_unwind_retval = unwind_thread(thread);
  102. if (!global_unwind_retval) {
  103. callchain_param.order = ORDER_CALLEE;
  104. global_unwind_retval = unwind_thread(thread);
  105. }
  106. }
  107. return p1 - p2;
  108. }
  109. __attribute__ ((noinline))
  110. static int krava_3(struct thread *thread)
  111. {
  112. struct thread *array[2] = {thread, thread};
  113. void *fp = &bsearch;
  114. /*
  115. * make _bsearch a volatile function pointer to
  116. * prevent potential optimization, which may expand
  117. * bsearch and call compare directly from this function,
  118. * instead of libc shared object.
  119. */
  120. void *(*volatile _bsearch)(void *, void *, size_t,
  121. size_t, int (*)(void *, void *));
  122. _bsearch = fp;
  123. _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
  124. return global_unwind_retval;
  125. }
  126. __attribute__ ((noinline))
  127. static int krava_2(struct thread *thread)
  128. {
  129. return krava_3(thread);
  130. }
  131. __attribute__ ((noinline))
  132. static int krava_1(struct thread *thread)
  133. {
  134. return krava_2(thread);
  135. }
  136. int test__dwarf_unwind(int subtest __maybe_unused)
  137. {
  138. struct machine *machine;
  139. struct thread *thread;
  140. int err = -1;
  141. machine = machine__new_host();
  142. if (!machine) {
  143. pr_err("Could not get machine\n");
  144. return -1;
  145. }
  146. if (machine__create_kernel_maps(machine)) {
  147. pr_err("Failed to create kernel maps\n");
  148. return -1;
  149. }
  150. callchain_param.record_mode = CALLCHAIN_DWARF;
  151. if (init_live_machine(machine)) {
  152. pr_err("Could not init machine\n");
  153. goto out;
  154. }
  155. if (verbose > 1)
  156. machine__fprintf(machine, stderr);
  157. thread = machine__find_thread(machine, getpid(), getpid());
  158. if (!thread) {
  159. pr_err("Could not get thread\n");
  160. goto out;
  161. }
  162. err = krava_1(thread);
  163. thread__put(thread);
  164. out:
  165. machine__delete_threads(machine);
  166. machine__delete(machine);
  167. return err;
  168. }