bpf-prologue.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bpf-prologue.c
  4. *
  5. * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
  6. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  7. * Copyright (C) 2015 Huawei Inc.
  8. */
  9. #include <bpf/libbpf.h>
  10. #include "perf.h"
  11. #include "debug.h"
  12. #include "bpf-loader.h"
  13. #include "bpf-prologue.h"
  14. #include "probe-finder.h"
  15. #include <errno.h>
  16. #include <dwarf-regs.h>
  17. #include <linux/filter.h>
  18. #define BPF_REG_SIZE 8
  19. #define JMP_TO_ERROR_CODE -1
  20. #define JMP_TO_SUCCESS_CODE -2
  21. #define JMP_TO_USER_CODE -3
  22. struct bpf_insn_pos {
  23. struct bpf_insn *begin;
  24. struct bpf_insn *end;
  25. struct bpf_insn *pos;
  26. };
  27. static inline int
  28. pos_get_cnt(struct bpf_insn_pos *pos)
  29. {
  30. return pos->pos - pos->begin;
  31. }
  32. static int
  33. append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
  34. {
  35. if (!pos->pos)
  36. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  37. if (pos->pos + 1 >= pos->end) {
  38. pr_err("bpf prologue: prologue too long\n");
  39. pos->pos = NULL;
  40. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  41. }
  42. *(pos->pos)++ = new_insn;
  43. return 0;
  44. }
  45. static int
  46. check_pos(struct bpf_insn_pos *pos)
  47. {
  48. if (!pos->pos || pos->pos >= pos->end)
  49. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  50. return 0;
  51. }
  52. /*
  53. * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
  54. * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM
  55. * instruction (BPF_{B,H,W,DW}).
  56. */
  57. static int
  58. argtype_to_ldx_size(const char *type)
  59. {
  60. int arg_size = type ? atoi(&type[1]) : 64;
  61. switch (arg_size) {
  62. case 8:
  63. return BPF_B;
  64. case 16:
  65. return BPF_H;
  66. case 32:
  67. return BPF_W;
  68. case 64:
  69. default:
  70. return BPF_DW;
  71. }
  72. }
  73. static const char *
  74. insn_sz_to_str(int insn_sz)
  75. {
  76. switch (insn_sz) {
  77. case BPF_B:
  78. return "BPF_B";
  79. case BPF_H:
  80. return "BPF_H";
  81. case BPF_W:
  82. return "BPF_W";
  83. case BPF_DW:
  84. return "BPF_DW";
  85. default:
  86. return "UNKNOWN";
  87. }
  88. }
  89. /* Give it a shorter name */
  90. #define ins(i, p) append_insn((i), (p))
  91. /*
  92. * Give a register name (in 'reg'), generate instruction to
  93. * load register into an eBPF register rd:
  94. * 'ldd target_reg, offset(ctx_reg)', where:
  95. * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
  96. */
  97. static int
  98. gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
  99. const char *reg, int target_reg)
  100. {
  101. int offset = regs_query_register_offset(reg);
  102. if (offset < 0) {
  103. pr_err("bpf: prologue: failed to get register %s\n",
  104. reg);
  105. return offset;
  106. }
  107. ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
  108. return check_pos(pos);
  109. }
  110. /*
  111. * Generate a BPF_FUNC_probe_read function call.
  112. *
  113. * src_base_addr_reg is a register holding base address,
  114. * dst_addr_reg is a register holding dest address (on stack),
  115. * result is:
  116. *
  117. * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
  118. *
  119. * Arguments of BPF_FUNC_probe_read:
  120. * ARG1: ptr to stack (dest)
  121. * ARG2: size (8)
  122. * ARG3: unsafe ptr (src)
  123. */
  124. static int
  125. gen_read_mem(struct bpf_insn_pos *pos,
  126. int src_base_addr_reg,
  127. int dst_addr_reg,
  128. long offset)
  129. {
  130. /* mov arg3, src_base_addr_reg */
  131. if (src_base_addr_reg != BPF_REG_ARG3)
  132. ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
  133. /* add arg3, #offset */
  134. if (offset)
  135. ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
  136. /* mov arg2, #reg_size */
  137. ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
  138. /* mov arg1, dst_addr_reg */
  139. if (dst_addr_reg != BPF_REG_ARG1)
  140. ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
  141. /* Call probe_read */
  142. ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
  143. /*
  144. * Error processing: if read fail, goto error code,
  145. * will be relocated. Target should be the start of
  146. * error processing code.
  147. */
  148. ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
  149. pos);
  150. return check_pos(pos);
  151. }
  152. /*
  153. * Each arg should be bare register. Fetch and save them into argument
  154. * registers (r3 - r5).
  155. *
  156. * BPF_REG_1 should have been initialized with pointer to
  157. * 'struct pt_regs'.
  158. */
  159. static int
  160. gen_prologue_fastpath(struct bpf_insn_pos *pos,
  161. struct probe_trace_arg *args, int nargs)
  162. {
  163. int i, err = 0;
  164. for (i = 0; i < nargs; i++) {
  165. err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
  166. BPF_PROLOGUE_START_ARG_REG + i);
  167. if (err)
  168. goto errout;
  169. }
  170. return check_pos(pos);
  171. errout:
  172. return err;
  173. }
  174. /*
  175. * Slow path:
  176. * At least one argument has the form of 'offset($rx)'.
  177. *
  178. * Following code first stores them into stack, then loads all of then
  179. * to r2 - r5.
  180. * Before final loading, the final result should be:
  181. *
  182. * low address
  183. * BPF_REG_FP - 24 ARG3
  184. * BPF_REG_FP - 16 ARG2
  185. * BPF_REG_FP - 8 ARG1
  186. * BPF_REG_FP
  187. * high address
  188. *
  189. * For each argument (described as: offn(...off2(off1(reg)))),
  190. * generates following code:
  191. *
  192. * r7 <- fp
  193. * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
  194. * // fp before generating args. However,
  195. * // eBPF won't regard r7 as stack pointer
  196. * // if it is generated by minus 8 from
  197. * // another stack pointer except fp.
  198. * // This is why we have to set r7
  199. * // to fp for each variable.
  200. * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
  201. * (r7) <- r3 // skip following instructions for bare reg
  202. * r3 <- r3 + off1 . // skip if off1 == 0
  203. * r2 <- 8 \
  204. * r1 <- r7 |-> generated by gen_read_mem()
  205. * call probe_read /
  206. * jnei r0, 0, err ./
  207. * r3 <- (r7)
  208. * r3 <- r3 + off2 . // skip if off2 == 0
  209. * r2 <- 8 \ // r2 may be broken by probe_read, so set again
  210. * r1 <- r7 |-> generated by gen_read_mem()
  211. * call probe_read /
  212. * jnei r0, 0, err ./
  213. * ...
  214. */
  215. static int
  216. gen_prologue_slowpath(struct bpf_insn_pos *pos,
  217. struct probe_trace_arg *args, int nargs)
  218. {
  219. int err, i;
  220. for (i = 0; i < nargs; i++) {
  221. struct probe_trace_arg *arg = &args[i];
  222. const char *reg = arg->value;
  223. struct probe_trace_arg_ref *ref = NULL;
  224. int stack_offset = (i + 1) * -8;
  225. pr_debug("prologue: fetch arg %d, base reg is %s\n",
  226. i, reg);
  227. /* value of base register is stored into ARG3 */
  228. err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
  229. BPF_REG_ARG3);
  230. if (err) {
  231. pr_err("prologue: failed to get offset of register %s\n",
  232. reg);
  233. goto errout;
  234. }
  235. /* Make r7 the stack pointer. */
  236. ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
  237. /* r7 += -8 */
  238. ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
  239. /*
  240. * Store r3 (base register) onto stack
  241. * Ensure fp[offset] is set.
  242. * fp is the only valid base register when storing
  243. * into stack. We are not allowed to use r7 as base
  244. * register here.
  245. */
  246. ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
  247. stack_offset), pos);
  248. ref = arg->ref;
  249. while (ref) {
  250. pr_debug("prologue: arg %d: offset %ld\n",
  251. i, ref->offset);
  252. err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
  253. ref->offset);
  254. if (err) {
  255. pr_err("prologue: failed to generate probe_read function call\n");
  256. goto errout;
  257. }
  258. ref = ref->next;
  259. /*
  260. * Load previous result into ARG3. Use
  261. * BPF_REG_FP instead of r7 because verifier
  262. * allows FP based addressing only.
  263. */
  264. if (ref)
  265. ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
  266. BPF_REG_FP, stack_offset), pos);
  267. }
  268. }
  269. /* Final pass: read to registers */
  270. for (i = 0; i < nargs; i++) {
  271. int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
  272. pr_debug("prologue: load arg %d, insn_sz is %s\n",
  273. i, insn_sz_to_str(insn_sz));
  274. ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
  275. BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
  276. }
  277. ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
  278. return check_pos(pos);
  279. errout:
  280. return err;
  281. }
  282. static int
  283. prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
  284. struct bpf_insn *success_code, struct bpf_insn *user_code)
  285. {
  286. struct bpf_insn *insn;
  287. if (check_pos(pos))
  288. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  289. for (insn = pos->begin; insn < pos->pos; insn++) {
  290. struct bpf_insn *target;
  291. u8 class = BPF_CLASS(insn->code);
  292. u8 opcode;
  293. if (class != BPF_JMP)
  294. continue;
  295. opcode = BPF_OP(insn->code);
  296. if (opcode == BPF_CALL)
  297. continue;
  298. switch (insn->off) {
  299. case JMP_TO_ERROR_CODE:
  300. target = error_code;
  301. break;
  302. case JMP_TO_SUCCESS_CODE:
  303. target = success_code;
  304. break;
  305. case JMP_TO_USER_CODE:
  306. target = user_code;
  307. break;
  308. default:
  309. pr_err("bpf prologue: internal error: relocation failed\n");
  310. return -BPF_LOADER_ERRNO__PROLOGUE;
  311. }
  312. insn->off = target - (insn + 1);
  313. }
  314. return 0;
  315. }
  316. int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
  317. struct bpf_insn *new_prog, size_t *new_cnt,
  318. size_t cnt_space)
  319. {
  320. struct bpf_insn *success_code = NULL;
  321. struct bpf_insn *error_code = NULL;
  322. struct bpf_insn *user_code = NULL;
  323. struct bpf_insn_pos pos;
  324. bool fastpath = true;
  325. int err = 0, i;
  326. if (!new_prog || !new_cnt)
  327. return -EINVAL;
  328. if (cnt_space > BPF_MAXINSNS)
  329. cnt_space = BPF_MAXINSNS;
  330. pos.begin = new_prog;
  331. pos.end = new_prog + cnt_space;
  332. pos.pos = new_prog;
  333. if (!nargs) {
  334. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
  335. &pos);
  336. if (check_pos(&pos))
  337. goto errout;
  338. *new_cnt = pos_get_cnt(&pos);
  339. return 0;
  340. }
  341. if (nargs > BPF_PROLOGUE_MAX_ARGS) {
  342. pr_warning("bpf: prologue: %d arguments are dropped\n",
  343. nargs - BPF_PROLOGUE_MAX_ARGS);
  344. nargs = BPF_PROLOGUE_MAX_ARGS;
  345. }
  346. /* First pass: validation */
  347. for (i = 0; i < nargs; i++) {
  348. struct probe_trace_arg_ref *ref = args[i].ref;
  349. if (args[i].value[0] == '@') {
  350. /* TODO: fetch global variable */
  351. pr_err("bpf: prologue: global %s%+ld not support\n",
  352. args[i].value, ref ? ref->offset : 0);
  353. return -ENOTSUP;
  354. }
  355. while (ref) {
  356. /* fastpath is true if all args has ref == NULL */
  357. fastpath = false;
  358. /*
  359. * Instruction encodes immediate value using
  360. * s32, ref->offset is long. On systems which
  361. * can't fill long in s32, refuse to process if
  362. * ref->offset too large (or small).
  363. */
  364. #ifdef __LP64__
  365. #define OFFSET_MAX ((1LL << 31) - 1)
  366. #define OFFSET_MIN ((1LL << 31) * -1)
  367. if (ref->offset > OFFSET_MAX ||
  368. ref->offset < OFFSET_MIN) {
  369. pr_err("bpf: prologue: offset out of bound: %ld\n",
  370. ref->offset);
  371. return -BPF_LOADER_ERRNO__PROLOGUEOOB;
  372. }
  373. #endif
  374. ref = ref->next;
  375. }
  376. }
  377. pr_debug("prologue: pass validation\n");
  378. if (fastpath) {
  379. /* If all variables are registers... */
  380. pr_debug("prologue: fast path\n");
  381. err = gen_prologue_fastpath(&pos, args, nargs);
  382. if (err)
  383. goto errout;
  384. } else {
  385. pr_debug("prologue: slow path\n");
  386. /* Initialization: move ctx to a callee saved register. */
  387. ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
  388. err = gen_prologue_slowpath(&pos, args, nargs);
  389. if (err)
  390. goto errout;
  391. /*
  392. * start of ERROR_CODE (only slow pass needs error code)
  393. * mov r2 <- 1 // r2 is error number
  394. * mov r3 <- 0 // r3, r4... should be touched or
  395. * // verifier would complain
  396. * mov r4 <- 0
  397. * ...
  398. * goto usercode
  399. */
  400. error_code = pos.pos;
  401. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
  402. &pos);
  403. for (i = 0; i < nargs; i++)
  404. ins(BPF_ALU64_IMM(BPF_MOV,
  405. BPF_PROLOGUE_START_ARG_REG + i,
  406. 0),
  407. &pos);
  408. ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
  409. &pos);
  410. }
  411. /*
  412. * start of SUCCESS_CODE:
  413. * mov r2 <- 0
  414. * goto usercode // skip
  415. */
  416. success_code = pos.pos;
  417. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
  418. /*
  419. * start of USER_CODE:
  420. * Restore ctx to r1
  421. */
  422. user_code = pos.pos;
  423. if (!fastpath) {
  424. /*
  425. * Only slow path needs restoring of ctx. In fast path,
  426. * register are loaded directly from r1.
  427. */
  428. ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
  429. err = prologue_relocate(&pos, error_code, success_code,
  430. user_code);
  431. if (err)
  432. goto errout;
  433. }
  434. err = check_pos(&pos);
  435. if (err)
  436. goto errout;
  437. *new_cnt = pos_get_cnt(&pos);
  438. return 0;
  439. errout:
  440. return err;
  441. }