bpf.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. #include <stdio.h>
  2. #include <sys/epoll.h>
  3. #include <util/util.h>
  4. #include <util/bpf-loader.h>
  5. #include <util/evlist.h>
  6. #include <linux/bpf.h>
  7. #include <linux/filter.h>
  8. #include <bpf/bpf.h>
  9. #include "tests.h"
  10. #include "llvm.h"
  11. #include "debug.h"
  12. #define NR_ITERS 111
  13. #ifdef HAVE_LIBBPF_SUPPORT
  14. static int epoll_wait_loop(void)
  15. {
  16. int i;
  17. /* Should fail NR_ITERS times */
  18. for (i = 0; i < NR_ITERS; i++)
  19. epoll_wait(-(i + 1), NULL, 0, 0);
  20. return 0;
  21. }
  22. #ifdef HAVE_BPF_PROLOGUE
  23. static int llseek_loop(void)
  24. {
  25. int fds[2], i;
  26. fds[0] = open("/dev/null", O_RDONLY);
  27. fds[1] = open("/dev/null", O_RDWR);
  28. if (fds[0] < 0 || fds[1] < 0)
  29. return -1;
  30. for (i = 0; i < NR_ITERS; i++) {
  31. lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  32. lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  33. }
  34. close(fds[0]);
  35. close(fds[1]);
  36. return 0;
  37. }
  38. #endif
  39. static struct {
  40. enum test_llvm__testcase prog_id;
  41. const char *desc;
  42. const char *name;
  43. const char *msg_compile_fail;
  44. const char *msg_load_fail;
  45. int (*target_func)(void);
  46. int expect_result;
  47. } bpf_testcase_table[] = {
  48. {
  49. LLVM_TESTCASE_BASE,
  50. "Test basic BPF filtering",
  51. "[basic_bpf_test]",
  52. "fix 'perf test LLVM' first",
  53. "load bpf object failed",
  54. &epoll_wait_loop,
  55. (NR_ITERS + 1) / 2,
  56. },
  57. #ifdef HAVE_BPF_PROLOGUE
  58. {
  59. LLVM_TESTCASE_BPF_PROLOGUE,
  60. "Test BPF prologue generation",
  61. "[bpf_prologue_test]",
  62. "fix kbuild first",
  63. "check your vmlinux setting?",
  64. &llseek_loop,
  65. (NR_ITERS + 1) / 4,
  66. },
  67. #endif
  68. {
  69. LLVM_TESTCASE_BPF_RELOCATION,
  70. "Test BPF relocation checker",
  71. "[bpf_relocation_test]",
  72. "fix 'perf test LLVM' first",
  73. "libbpf error when dealing with relocation",
  74. NULL,
  75. 0,
  76. },
  77. };
  78. static int do_test(struct bpf_object *obj, int (*func)(void),
  79. int expect)
  80. {
  81. struct record_opts opts = {
  82. .target = {
  83. .uid = UINT_MAX,
  84. .uses_mmap = true,
  85. },
  86. .freq = 0,
  87. .mmap_pages = 256,
  88. .default_interval = 1,
  89. };
  90. char pid[16];
  91. char sbuf[STRERR_BUFSIZE];
  92. struct perf_evlist *evlist;
  93. int i, ret = TEST_FAIL, err = 0, count = 0;
  94. struct parse_events_evlist parse_evlist;
  95. struct parse_events_error parse_error;
  96. bzero(&parse_error, sizeof(parse_error));
  97. bzero(&parse_evlist, sizeof(parse_evlist));
  98. parse_evlist.error = &parse_error;
  99. INIT_LIST_HEAD(&parse_evlist.list);
  100. err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL);
  101. if (err || list_empty(&parse_evlist.list)) {
  102. pr_debug("Failed to add events selected by BPF\n");
  103. return TEST_FAIL;
  104. }
  105. snprintf(pid, sizeof(pid), "%d", getpid());
  106. pid[sizeof(pid) - 1] = '\0';
  107. opts.target.tid = opts.target.pid = pid;
  108. /* Instead of perf_evlist__new_default, don't add default events */
  109. evlist = perf_evlist__new();
  110. if (!evlist) {
  111. pr_debug("No enough memory to create evlist\n");
  112. return TEST_FAIL;
  113. }
  114. err = perf_evlist__create_maps(evlist, &opts.target);
  115. if (err < 0) {
  116. pr_debug("Not enough memory to create thread/cpu maps\n");
  117. goto out_delete_evlist;
  118. }
  119. perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
  120. evlist->nr_groups = parse_evlist.nr_groups;
  121. perf_evlist__config(evlist, &opts, NULL);
  122. err = perf_evlist__open(evlist);
  123. if (err < 0) {
  124. pr_debug("perf_evlist__open: %s\n",
  125. str_error_r(errno, sbuf, sizeof(sbuf)));
  126. goto out_delete_evlist;
  127. }
  128. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  129. if (err < 0) {
  130. pr_debug("perf_evlist__mmap: %s\n",
  131. str_error_r(errno, sbuf, sizeof(sbuf)));
  132. goto out_delete_evlist;
  133. }
  134. perf_evlist__enable(evlist);
  135. (*func)();
  136. perf_evlist__disable(evlist);
  137. for (i = 0; i < evlist->nr_mmaps; i++) {
  138. union perf_event *event;
  139. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  140. const u32 type = event->header.type;
  141. if (type == PERF_RECORD_SAMPLE)
  142. count ++;
  143. }
  144. }
  145. if (count != expect) {
  146. pr_debug("BPF filter result incorrect\n");
  147. goto out_delete_evlist;
  148. }
  149. ret = TEST_OK;
  150. out_delete_evlist:
  151. perf_evlist__delete(evlist);
  152. return ret;
  153. }
  154. static struct bpf_object *
  155. prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
  156. {
  157. struct bpf_object *obj;
  158. obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
  159. if (IS_ERR(obj)) {
  160. pr_debug("Compile BPF program failed.\n");
  161. return NULL;
  162. }
  163. return obj;
  164. }
  165. static int __test__bpf(int idx)
  166. {
  167. int ret;
  168. void *obj_buf;
  169. size_t obj_buf_sz;
  170. struct bpf_object *obj;
  171. ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
  172. bpf_testcase_table[idx].prog_id,
  173. true, NULL);
  174. if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
  175. pr_debug("Unable to get BPF object, %s\n",
  176. bpf_testcase_table[idx].msg_compile_fail);
  177. if (idx == 0)
  178. return TEST_SKIP;
  179. else
  180. return TEST_FAIL;
  181. }
  182. obj = prepare_bpf(obj_buf, obj_buf_sz,
  183. bpf_testcase_table[idx].name);
  184. if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
  185. if (!obj)
  186. pr_debug("Fail to load BPF object: %s\n",
  187. bpf_testcase_table[idx].msg_load_fail);
  188. else
  189. pr_debug("Success unexpectedly: %s\n",
  190. bpf_testcase_table[idx].msg_load_fail);
  191. ret = TEST_FAIL;
  192. goto out;
  193. }
  194. if (obj)
  195. ret = do_test(obj,
  196. bpf_testcase_table[idx].target_func,
  197. bpf_testcase_table[idx].expect_result);
  198. out:
  199. bpf__clear();
  200. return ret;
  201. }
  202. int test__bpf_subtest_get_nr(void)
  203. {
  204. return (int)ARRAY_SIZE(bpf_testcase_table);
  205. }
  206. const char *test__bpf_subtest_get_desc(int i)
  207. {
  208. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  209. return NULL;
  210. return bpf_testcase_table[i].desc;
  211. }
  212. static int check_env(void)
  213. {
  214. int err;
  215. unsigned int kver_int;
  216. char license[] = "GPL";
  217. struct bpf_insn insns[] = {
  218. BPF_MOV64_IMM(BPF_REG_0, 1),
  219. BPF_EXIT_INSN(),
  220. };
  221. err = fetch_kernel_version(&kver_int, NULL, 0);
  222. if (err) {
  223. pr_debug("Unable to get kernel version\n");
  224. return err;
  225. }
  226. err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  227. sizeof(insns) / sizeof(insns[0]),
  228. license, kver_int, NULL, 0);
  229. if (err < 0) {
  230. pr_err("Missing basic BPF support, skip this test: %s\n",
  231. strerror(errno));
  232. return err;
  233. }
  234. close(err);
  235. return 0;
  236. }
  237. int test__bpf(int i)
  238. {
  239. int err;
  240. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  241. return TEST_FAIL;
  242. if (geteuid() != 0) {
  243. pr_debug("Only root can run BPF test\n");
  244. return TEST_SKIP;
  245. }
  246. if (check_env())
  247. return TEST_SKIP;
  248. err = __test__bpf(i);
  249. return err;
  250. }
  251. #else
  252. int test__bpf_subtest_get_nr(void)
  253. {
  254. return 0;
  255. }
  256. const char *test__bpf_subtest_get_desc(int i __maybe_unused)
  257. {
  258. return NULL;
  259. }
  260. int test__bpf(int i __maybe_unused)
  261. {
  262. pr_debug("Skip BPF test because BPF support is not compiled\n");
  263. return TEST_SKIP;
  264. }
  265. #endif