libbpf.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476
  1. /*
  2. * Common eBPF ELF object loading operations.
  3. *
  4. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation;
  11. * version 2.1 of the License (not later!)
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this program; if not, see <http://www.gnu.org/licenses>
  20. */
  21. #include <stdlib.h>
  22. #include <stdio.h>
  23. #include <stdarg.h>
  24. #include <inttypes.h>
  25. #include <string.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <errno.h>
  29. #include <asm/unistd.h>
  30. #include <linux/kernel.h>
  31. #include <linux/bpf.h>
  32. #include <linux/list.h>
  33. #include <libelf.h>
  34. #include <gelf.h>
  35. #include "libbpf.h"
  36. #include "bpf.h"
  37. #ifndef EM_BPF
  38. #define EM_BPF 247
  39. #endif
  40. #define __printf(a, b) __attribute__((format(printf, a, b)))
  41. __printf(1, 2)
  42. static int __base_pr(const char *format, ...)
  43. {
  44. va_list args;
  45. int err;
  46. va_start(args, format);
  47. err = vfprintf(stderr, format, args);
  48. va_end(args);
  49. return err;
  50. }
  51. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  52. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  53. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  54. #define __pr(func, fmt, ...) \
  55. do { \
  56. if ((func)) \
  57. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  58. } while (0)
  59. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  60. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  61. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  62. void libbpf_set_print(libbpf_print_fn_t warn,
  63. libbpf_print_fn_t info,
  64. libbpf_print_fn_t debug)
  65. {
  66. __pr_warning = warn;
  67. __pr_info = info;
  68. __pr_debug = debug;
  69. }
  70. #define STRERR_BUFSIZE 128
  71. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  72. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  73. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  74. static const char *libbpf_strerror_table[NR_ERRNO] = {
  75. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  76. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  77. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  78. [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
  79. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  80. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  81. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  82. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  83. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  84. [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
  85. };
  86. int libbpf_strerror(int err, char *buf, size_t size)
  87. {
  88. if (!buf || !size)
  89. return -1;
  90. err = err > 0 ? err : -err;
  91. if (err < __LIBBPF_ERRNO__START) {
  92. int ret;
  93. ret = strerror_r(err, buf, size);
  94. buf[size - 1] = '\0';
  95. return ret;
  96. }
  97. if (err < __LIBBPF_ERRNO__END) {
  98. const char *msg;
  99. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  100. snprintf(buf, size, "%s", msg);
  101. buf[size - 1] = '\0';
  102. return 0;
  103. }
  104. snprintf(buf, size, "Unknown libbpf error %d", err);
  105. buf[size - 1] = '\0';
  106. return -1;
  107. }
  108. #define CHECK_ERR(action, err, out) do { \
  109. err = action; \
  110. if (err) \
  111. goto out; \
  112. } while(0)
  113. /* Copied from tools/perf/util/util.h */
  114. #ifndef zfree
  115. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  116. #endif
  117. #ifndef zclose
  118. # define zclose(fd) ({ \
  119. int ___err = 0; \
  120. if ((fd) >= 0) \
  121. ___err = close((fd)); \
  122. fd = -1; \
  123. ___err; })
  124. #endif
  125. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  126. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  127. #else
  128. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  129. #endif
  130. /*
  131. * bpf_prog should be a better name but it has been used in
  132. * linux/filter.h.
  133. */
  134. struct bpf_program {
  135. /* Index in elf obj file, for relocation use. */
  136. int idx;
  137. char *section_name;
  138. struct bpf_insn *insns;
  139. size_t insns_cnt;
  140. enum bpf_prog_type type;
  141. struct {
  142. int insn_idx;
  143. int map_idx;
  144. } *reloc_desc;
  145. int nr_reloc;
  146. struct {
  147. int nr;
  148. int *fds;
  149. } instances;
  150. bpf_program_prep_t preprocessor;
  151. struct bpf_object *obj;
  152. void *priv;
  153. bpf_program_clear_priv_t clear_priv;
  154. };
  155. struct bpf_map {
  156. int fd;
  157. char *name;
  158. struct bpf_map_def def;
  159. void *priv;
  160. bpf_map_clear_priv_t clear_priv;
  161. };
  162. static LIST_HEAD(bpf_objects_list);
  163. struct bpf_object {
  164. char license[64];
  165. u32 kern_version;
  166. struct bpf_program *programs;
  167. size_t nr_programs;
  168. struct bpf_map *maps;
  169. size_t nr_maps;
  170. bool loaded;
  171. /*
  172. * Information when doing elf related work. Only valid if fd
  173. * is valid.
  174. */
  175. struct {
  176. int fd;
  177. void *obj_buf;
  178. size_t obj_buf_sz;
  179. Elf *elf;
  180. GElf_Ehdr ehdr;
  181. Elf_Data *symbols;
  182. size_t strtabidx;
  183. struct {
  184. GElf_Shdr shdr;
  185. Elf_Data *data;
  186. } *reloc;
  187. int nr_reloc;
  188. int maps_shndx;
  189. } efile;
  190. /*
  191. * All loaded bpf_object is linked in a list, which is
  192. * hidden to caller. bpf_objects__<func> handlers deal with
  193. * all objects.
  194. */
  195. struct list_head list;
  196. char path[];
  197. };
  198. #define obj_elf_valid(o) ((o)->efile.elf)
  199. static void bpf_program__unload(struct bpf_program *prog)
  200. {
  201. int i;
  202. if (!prog)
  203. return;
  204. /*
  205. * If the object is opened but the program was never loaded,
  206. * it is possible that prog->instances.nr == -1.
  207. */
  208. if (prog->instances.nr > 0) {
  209. for (i = 0; i < prog->instances.nr; i++)
  210. zclose(prog->instances.fds[i]);
  211. } else if (prog->instances.nr != -1) {
  212. pr_warning("Internal error: instances.nr is %d\n",
  213. prog->instances.nr);
  214. }
  215. prog->instances.nr = -1;
  216. zfree(&prog->instances.fds);
  217. }
  218. static void bpf_program__exit(struct bpf_program *prog)
  219. {
  220. if (!prog)
  221. return;
  222. if (prog->clear_priv)
  223. prog->clear_priv(prog, prog->priv);
  224. prog->priv = NULL;
  225. prog->clear_priv = NULL;
  226. bpf_program__unload(prog);
  227. zfree(&prog->section_name);
  228. zfree(&prog->insns);
  229. zfree(&prog->reloc_desc);
  230. prog->nr_reloc = 0;
  231. prog->insns_cnt = 0;
  232. prog->idx = -1;
  233. }
  234. static int
  235. bpf_program__init(void *data, size_t size, char *name, int idx,
  236. struct bpf_program *prog)
  237. {
  238. if (size < sizeof(struct bpf_insn)) {
  239. pr_warning("corrupted section '%s'\n", name);
  240. return -EINVAL;
  241. }
  242. bzero(prog, sizeof(*prog));
  243. prog->section_name = strdup(name);
  244. if (!prog->section_name) {
  245. pr_warning("failed to alloc name for prog %s\n",
  246. name);
  247. goto errout;
  248. }
  249. prog->insns = malloc(size);
  250. if (!prog->insns) {
  251. pr_warning("failed to alloc insns for %s\n", name);
  252. goto errout;
  253. }
  254. prog->insns_cnt = size / sizeof(struct bpf_insn);
  255. memcpy(prog->insns, data,
  256. prog->insns_cnt * sizeof(struct bpf_insn));
  257. prog->idx = idx;
  258. prog->instances.fds = NULL;
  259. prog->instances.nr = -1;
  260. prog->type = BPF_PROG_TYPE_KPROBE;
  261. return 0;
  262. errout:
  263. bpf_program__exit(prog);
  264. return -ENOMEM;
  265. }
  266. static int
  267. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  268. char *name, int idx)
  269. {
  270. struct bpf_program prog, *progs;
  271. int nr_progs, err;
  272. err = bpf_program__init(data, size, name, idx, &prog);
  273. if (err)
  274. return err;
  275. progs = obj->programs;
  276. nr_progs = obj->nr_programs;
  277. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  278. if (!progs) {
  279. /*
  280. * In this case the original obj->programs
  281. * is still valid, so don't need special treat for
  282. * bpf_close_object().
  283. */
  284. pr_warning("failed to alloc a new program '%s'\n",
  285. name);
  286. bpf_program__exit(&prog);
  287. return -ENOMEM;
  288. }
  289. pr_debug("found program %s\n", prog.section_name);
  290. obj->programs = progs;
  291. obj->nr_programs = nr_progs + 1;
  292. prog.obj = obj;
  293. progs[nr_progs] = prog;
  294. return 0;
  295. }
  296. static struct bpf_object *bpf_object__new(const char *path,
  297. void *obj_buf,
  298. size_t obj_buf_sz)
  299. {
  300. struct bpf_object *obj;
  301. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  302. if (!obj) {
  303. pr_warning("alloc memory failed for %s\n", path);
  304. return ERR_PTR(-ENOMEM);
  305. }
  306. strcpy(obj->path, path);
  307. obj->efile.fd = -1;
  308. /*
  309. * Caller of this function should also calls
  310. * bpf_object__elf_finish() after data collection to return
  311. * obj_buf to user. If not, we should duplicate the buffer to
  312. * avoid user freeing them before elf finish.
  313. */
  314. obj->efile.obj_buf = obj_buf;
  315. obj->efile.obj_buf_sz = obj_buf_sz;
  316. obj->efile.maps_shndx = -1;
  317. obj->loaded = false;
  318. INIT_LIST_HEAD(&obj->list);
  319. list_add(&obj->list, &bpf_objects_list);
  320. return obj;
  321. }
  322. static void bpf_object__elf_finish(struct bpf_object *obj)
  323. {
  324. if (!obj_elf_valid(obj))
  325. return;
  326. if (obj->efile.elf) {
  327. elf_end(obj->efile.elf);
  328. obj->efile.elf = NULL;
  329. }
  330. obj->efile.symbols = NULL;
  331. zfree(&obj->efile.reloc);
  332. obj->efile.nr_reloc = 0;
  333. zclose(obj->efile.fd);
  334. obj->efile.obj_buf = NULL;
  335. obj->efile.obj_buf_sz = 0;
  336. }
  337. static int bpf_object__elf_init(struct bpf_object *obj)
  338. {
  339. int err = 0;
  340. GElf_Ehdr *ep;
  341. if (obj_elf_valid(obj)) {
  342. pr_warning("elf init: internal error\n");
  343. return -LIBBPF_ERRNO__LIBELF;
  344. }
  345. if (obj->efile.obj_buf_sz > 0) {
  346. /*
  347. * obj_buf should have been validated by
  348. * bpf_object__open_buffer().
  349. */
  350. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  351. obj->efile.obj_buf_sz);
  352. } else {
  353. obj->efile.fd = open(obj->path, O_RDONLY);
  354. if (obj->efile.fd < 0) {
  355. pr_warning("failed to open %s: %s\n", obj->path,
  356. strerror(errno));
  357. return -errno;
  358. }
  359. obj->efile.elf = elf_begin(obj->efile.fd,
  360. LIBBPF_ELF_C_READ_MMAP,
  361. NULL);
  362. }
  363. if (!obj->efile.elf) {
  364. pr_warning("failed to open %s as ELF file\n",
  365. obj->path);
  366. err = -LIBBPF_ERRNO__LIBELF;
  367. goto errout;
  368. }
  369. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  370. pr_warning("failed to get EHDR from %s\n",
  371. obj->path);
  372. err = -LIBBPF_ERRNO__FORMAT;
  373. goto errout;
  374. }
  375. ep = &obj->efile.ehdr;
  376. /* Old LLVM set e_machine to EM_NONE */
  377. if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
  378. pr_warning("%s is not an eBPF object file\n",
  379. obj->path);
  380. err = -LIBBPF_ERRNO__FORMAT;
  381. goto errout;
  382. }
  383. return 0;
  384. errout:
  385. bpf_object__elf_finish(obj);
  386. return err;
  387. }
  388. static int
  389. bpf_object__check_endianness(struct bpf_object *obj)
  390. {
  391. static unsigned int const endian = 1;
  392. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  393. case ELFDATA2LSB:
  394. /* We are big endian, BPF obj is little endian. */
  395. if (*(unsigned char const *)&endian != 1)
  396. goto mismatch;
  397. break;
  398. case ELFDATA2MSB:
  399. /* We are little endian, BPF obj is big endian. */
  400. if (*(unsigned char const *)&endian != 0)
  401. goto mismatch;
  402. break;
  403. default:
  404. return -LIBBPF_ERRNO__ENDIAN;
  405. }
  406. return 0;
  407. mismatch:
  408. pr_warning("Error: endianness mismatch.\n");
  409. return -LIBBPF_ERRNO__ENDIAN;
  410. }
  411. static int
  412. bpf_object__init_license(struct bpf_object *obj,
  413. void *data, size_t size)
  414. {
  415. memcpy(obj->license, data,
  416. min(size, sizeof(obj->license) - 1));
  417. pr_debug("license of %s is %s\n", obj->path, obj->license);
  418. return 0;
  419. }
  420. static int
  421. bpf_object__init_kversion(struct bpf_object *obj,
  422. void *data, size_t size)
  423. {
  424. u32 kver;
  425. if (size != sizeof(kver)) {
  426. pr_warning("invalid kver section in %s\n", obj->path);
  427. return -LIBBPF_ERRNO__FORMAT;
  428. }
  429. memcpy(&kver, data, sizeof(kver));
  430. obj->kern_version = kver;
  431. pr_debug("kernel version of %s is %x\n", obj->path,
  432. obj->kern_version);
  433. return 0;
  434. }
  435. static int
  436. bpf_object__init_maps(struct bpf_object *obj, void *data,
  437. size_t size)
  438. {
  439. size_t nr_maps;
  440. int i;
  441. nr_maps = size / sizeof(struct bpf_map_def);
  442. if (!data || !nr_maps) {
  443. pr_debug("%s doesn't need map definition\n",
  444. obj->path);
  445. return 0;
  446. }
  447. pr_debug("maps in %s: %zd bytes\n", obj->path, size);
  448. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  449. if (!obj->maps) {
  450. pr_warning("alloc maps for object failed\n");
  451. return -ENOMEM;
  452. }
  453. obj->nr_maps = nr_maps;
  454. for (i = 0; i < nr_maps; i++) {
  455. struct bpf_map_def *def = &obj->maps[i].def;
  456. /*
  457. * fill all fd with -1 so won't close incorrect
  458. * fd (fd=0 is stdin) when failure (zclose won't close
  459. * negative fd)).
  460. */
  461. obj->maps[i].fd = -1;
  462. /* Save map definition into obj->maps */
  463. *def = ((struct bpf_map_def *)data)[i];
  464. }
  465. return 0;
  466. }
  467. static int
  468. bpf_object__init_maps_name(struct bpf_object *obj)
  469. {
  470. int i;
  471. Elf_Data *symbols = obj->efile.symbols;
  472. if (!symbols || obj->efile.maps_shndx < 0)
  473. return -EINVAL;
  474. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  475. GElf_Sym sym;
  476. size_t map_idx;
  477. const char *map_name;
  478. if (!gelf_getsym(symbols, i, &sym))
  479. continue;
  480. if (sym.st_shndx != obj->efile.maps_shndx)
  481. continue;
  482. map_name = elf_strptr(obj->efile.elf,
  483. obj->efile.strtabidx,
  484. sym.st_name);
  485. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  486. if (map_idx >= obj->nr_maps) {
  487. pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
  488. map_name, map_idx, obj->nr_maps);
  489. continue;
  490. }
  491. obj->maps[map_idx].name = strdup(map_name);
  492. if (!obj->maps[map_idx].name) {
  493. pr_warning("failed to alloc map name\n");
  494. return -ENOMEM;
  495. }
  496. pr_debug("map %zu is \"%s\"\n", map_idx,
  497. obj->maps[map_idx].name);
  498. }
  499. return 0;
  500. }
  501. static bool section_have_execinstr(struct bpf_object *obj, int idx)
  502. {
  503. Elf_Scn *scn;
  504. GElf_Shdr sh;
  505. scn = elf_getscn(obj->efile.elf, idx);
  506. if (!scn)
  507. return false;
  508. if (gelf_getshdr(scn, &sh) != &sh)
  509. return false;
  510. if (sh.sh_flags & SHF_EXECINSTR)
  511. return true;
  512. return false;
  513. }
  514. static int bpf_object__elf_collect(struct bpf_object *obj)
  515. {
  516. Elf *elf = obj->efile.elf;
  517. GElf_Ehdr *ep = &obj->efile.ehdr;
  518. Elf_Scn *scn = NULL;
  519. int idx = 0, err = 0;
  520. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  521. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  522. pr_warning("failed to get e_shstrndx from %s\n",
  523. obj->path);
  524. return -LIBBPF_ERRNO__FORMAT;
  525. }
  526. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  527. char *name;
  528. GElf_Shdr sh;
  529. Elf_Data *data;
  530. idx++;
  531. if (gelf_getshdr(scn, &sh) != &sh) {
  532. pr_warning("failed to get section header from %s\n",
  533. obj->path);
  534. err = -LIBBPF_ERRNO__FORMAT;
  535. goto out;
  536. }
  537. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  538. if (!name) {
  539. pr_warning("failed to get section name from %s\n",
  540. obj->path);
  541. err = -LIBBPF_ERRNO__FORMAT;
  542. goto out;
  543. }
  544. data = elf_getdata(scn, 0);
  545. if (!data) {
  546. pr_warning("failed to get section data from %s(%s)\n",
  547. name, obj->path);
  548. err = -LIBBPF_ERRNO__FORMAT;
  549. goto out;
  550. }
  551. pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
  552. name, (unsigned long)data->d_size,
  553. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  554. (int)sh.sh_type);
  555. if (strcmp(name, "license") == 0)
  556. err = bpf_object__init_license(obj,
  557. data->d_buf,
  558. data->d_size);
  559. else if (strcmp(name, "version") == 0)
  560. err = bpf_object__init_kversion(obj,
  561. data->d_buf,
  562. data->d_size);
  563. else if (strcmp(name, "maps") == 0) {
  564. err = bpf_object__init_maps(obj, data->d_buf,
  565. data->d_size);
  566. obj->efile.maps_shndx = idx;
  567. } else if (sh.sh_type == SHT_SYMTAB) {
  568. if (obj->efile.symbols) {
  569. pr_warning("bpf: multiple SYMTAB in %s\n",
  570. obj->path);
  571. err = -LIBBPF_ERRNO__FORMAT;
  572. } else {
  573. obj->efile.symbols = data;
  574. obj->efile.strtabidx = sh.sh_link;
  575. }
  576. } else if ((sh.sh_type == SHT_PROGBITS) &&
  577. (sh.sh_flags & SHF_EXECINSTR) &&
  578. (data->d_size > 0)) {
  579. err = bpf_object__add_program(obj, data->d_buf,
  580. data->d_size, name, idx);
  581. if (err) {
  582. char errmsg[STRERR_BUFSIZE];
  583. strerror_r(-err, errmsg, sizeof(errmsg));
  584. pr_warning("failed to alloc program %s (%s): %s",
  585. name, obj->path, errmsg);
  586. }
  587. } else if (sh.sh_type == SHT_REL) {
  588. void *reloc = obj->efile.reloc;
  589. int nr_reloc = obj->efile.nr_reloc + 1;
  590. int sec = sh.sh_info; /* points to other section */
  591. /* Only do relo for section with exec instructions */
  592. if (!section_have_execinstr(obj, sec)) {
  593. pr_debug("skip relo %s(%d) for section(%d)\n",
  594. name, idx, sec);
  595. continue;
  596. }
  597. reloc = realloc(reloc,
  598. sizeof(*obj->efile.reloc) * nr_reloc);
  599. if (!reloc) {
  600. pr_warning("realloc failed\n");
  601. err = -ENOMEM;
  602. } else {
  603. int n = nr_reloc - 1;
  604. obj->efile.reloc = reloc;
  605. obj->efile.nr_reloc = nr_reloc;
  606. obj->efile.reloc[n].shdr = sh;
  607. obj->efile.reloc[n].data = data;
  608. }
  609. }
  610. if (err)
  611. goto out;
  612. }
  613. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  614. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  615. return LIBBPF_ERRNO__FORMAT;
  616. }
  617. if (obj->efile.maps_shndx >= 0)
  618. err = bpf_object__init_maps_name(obj);
  619. out:
  620. return err;
  621. }
  622. static struct bpf_program *
  623. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  624. {
  625. struct bpf_program *prog;
  626. size_t i;
  627. for (i = 0; i < obj->nr_programs; i++) {
  628. prog = &obj->programs[i];
  629. if (prog->idx == idx)
  630. return prog;
  631. }
  632. return NULL;
  633. }
  634. static int
  635. bpf_program__collect_reloc(struct bpf_program *prog,
  636. size_t nr_maps, GElf_Shdr *shdr,
  637. Elf_Data *data, Elf_Data *symbols,
  638. int maps_shndx)
  639. {
  640. int i, nrels;
  641. pr_debug("collecting relocating info for: '%s'\n",
  642. prog->section_name);
  643. nrels = shdr->sh_size / shdr->sh_entsize;
  644. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  645. if (!prog->reloc_desc) {
  646. pr_warning("failed to alloc memory in relocation\n");
  647. return -ENOMEM;
  648. }
  649. prog->nr_reloc = nrels;
  650. for (i = 0; i < nrels; i++) {
  651. GElf_Sym sym;
  652. GElf_Rel rel;
  653. unsigned int insn_idx;
  654. struct bpf_insn *insns = prog->insns;
  655. size_t map_idx;
  656. if (!gelf_getrel(data, i, &rel)) {
  657. pr_warning("relocation: failed to get %d reloc\n", i);
  658. return -LIBBPF_ERRNO__FORMAT;
  659. }
  660. if (!gelf_getsym(symbols,
  661. GELF_R_SYM(rel.r_info),
  662. &sym)) {
  663. pr_warning("relocation: symbol %"PRIx64" not found\n",
  664. GELF_R_SYM(rel.r_info));
  665. return -LIBBPF_ERRNO__FORMAT;
  666. }
  667. if (sym.st_shndx != maps_shndx) {
  668. pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
  669. prog->section_name, sym.st_shndx);
  670. return -LIBBPF_ERRNO__RELOC;
  671. }
  672. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  673. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  674. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  675. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  676. insn_idx, insns[insn_idx].code);
  677. return -LIBBPF_ERRNO__RELOC;
  678. }
  679. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  680. if (map_idx >= nr_maps) {
  681. pr_warning("bpf relocation: map_idx %d large than %d\n",
  682. (int)map_idx, (int)nr_maps - 1);
  683. return -LIBBPF_ERRNO__RELOC;
  684. }
  685. prog->reloc_desc[i].insn_idx = insn_idx;
  686. prog->reloc_desc[i].map_idx = map_idx;
  687. }
  688. return 0;
  689. }
  690. static int
  691. bpf_object__create_maps(struct bpf_object *obj)
  692. {
  693. unsigned int i;
  694. for (i = 0; i < obj->nr_maps; i++) {
  695. struct bpf_map_def *def = &obj->maps[i].def;
  696. int *pfd = &obj->maps[i].fd;
  697. *pfd = bpf_create_map(def->type,
  698. def->key_size,
  699. def->value_size,
  700. def->max_entries);
  701. if (*pfd < 0) {
  702. size_t j;
  703. int err = *pfd;
  704. pr_warning("failed to create map: %s\n",
  705. strerror(errno));
  706. for (j = 0; j < i; j++)
  707. zclose(obj->maps[j].fd);
  708. return err;
  709. }
  710. pr_debug("create map: fd=%d\n", *pfd);
  711. }
  712. return 0;
  713. }
  714. static int
  715. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  716. {
  717. int i;
  718. if (!prog || !prog->reloc_desc)
  719. return 0;
  720. for (i = 0; i < prog->nr_reloc; i++) {
  721. int insn_idx, map_idx;
  722. struct bpf_insn *insns = prog->insns;
  723. insn_idx = prog->reloc_desc[i].insn_idx;
  724. map_idx = prog->reloc_desc[i].map_idx;
  725. if (insn_idx >= (int)prog->insns_cnt) {
  726. pr_warning("relocation out of range: '%s'\n",
  727. prog->section_name);
  728. return -LIBBPF_ERRNO__RELOC;
  729. }
  730. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  731. insns[insn_idx].imm = obj->maps[map_idx].fd;
  732. }
  733. zfree(&prog->reloc_desc);
  734. prog->nr_reloc = 0;
  735. return 0;
  736. }
  737. static int
  738. bpf_object__relocate(struct bpf_object *obj)
  739. {
  740. struct bpf_program *prog;
  741. size_t i;
  742. int err;
  743. for (i = 0; i < obj->nr_programs; i++) {
  744. prog = &obj->programs[i];
  745. err = bpf_program__relocate(prog, obj);
  746. if (err) {
  747. pr_warning("failed to relocate '%s'\n",
  748. prog->section_name);
  749. return err;
  750. }
  751. }
  752. return 0;
  753. }
  754. static int bpf_object__collect_reloc(struct bpf_object *obj)
  755. {
  756. int i, err;
  757. if (!obj_elf_valid(obj)) {
  758. pr_warning("Internal error: elf object is closed\n");
  759. return -LIBBPF_ERRNO__INTERNAL;
  760. }
  761. for (i = 0; i < obj->efile.nr_reloc; i++) {
  762. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  763. Elf_Data *data = obj->efile.reloc[i].data;
  764. int idx = shdr->sh_info;
  765. struct bpf_program *prog;
  766. size_t nr_maps = obj->nr_maps;
  767. if (shdr->sh_type != SHT_REL) {
  768. pr_warning("internal error at %d\n", __LINE__);
  769. return -LIBBPF_ERRNO__INTERNAL;
  770. }
  771. prog = bpf_object__find_prog_by_idx(obj, idx);
  772. if (!prog) {
  773. pr_warning("relocation failed: no %d section\n",
  774. idx);
  775. return -LIBBPF_ERRNO__RELOC;
  776. }
  777. err = bpf_program__collect_reloc(prog, nr_maps,
  778. shdr, data,
  779. obj->efile.symbols,
  780. obj->efile.maps_shndx);
  781. if (err)
  782. return err;
  783. }
  784. return 0;
  785. }
  786. static int
  787. load_program(enum bpf_prog_type type, struct bpf_insn *insns,
  788. int insns_cnt, char *license, u32 kern_version, int *pfd)
  789. {
  790. int ret;
  791. char *log_buf;
  792. if (!insns || !insns_cnt)
  793. return -EINVAL;
  794. log_buf = malloc(BPF_LOG_BUF_SIZE);
  795. if (!log_buf)
  796. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  797. ret = bpf_load_program(type, insns, insns_cnt, license,
  798. kern_version, log_buf, BPF_LOG_BUF_SIZE);
  799. if (ret >= 0) {
  800. *pfd = ret;
  801. ret = 0;
  802. goto out;
  803. }
  804. ret = -LIBBPF_ERRNO__LOAD;
  805. pr_warning("load bpf program failed: %s\n", strerror(errno));
  806. if (log_buf && log_buf[0] != '\0') {
  807. ret = -LIBBPF_ERRNO__VERIFY;
  808. pr_warning("-- BEGIN DUMP LOG ---\n");
  809. pr_warning("\n%s\n", log_buf);
  810. pr_warning("-- END LOG --\n");
  811. } else if (insns_cnt >= BPF_MAXINSNS) {
  812. pr_warning("Program too large (%d insns), at most %d insns\n",
  813. insns_cnt, BPF_MAXINSNS);
  814. ret = -LIBBPF_ERRNO__PROG2BIG;
  815. } else {
  816. /* Wrong program type? */
  817. if (type != BPF_PROG_TYPE_KPROBE) {
  818. int fd;
  819. fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  820. insns_cnt, license, kern_version,
  821. NULL, 0);
  822. if (fd >= 0) {
  823. close(fd);
  824. ret = -LIBBPF_ERRNO__PROGTYPE;
  825. goto out;
  826. }
  827. }
  828. if (log_buf)
  829. ret = -LIBBPF_ERRNO__KVER;
  830. }
  831. out:
  832. free(log_buf);
  833. return ret;
  834. }
  835. static int
  836. bpf_program__load(struct bpf_program *prog,
  837. char *license, u32 kern_version)
  838. {
  839. int err = 0, fd, i;
  840. if (prog->instances.nr < 0 || !prog->instances.fds) {
  841. if (prog->preprocessor) {
  842. pr_warning("Internal error: can't load program '%s'\n",
  843. prog->section_name);
  844. return -LIBBPF_ERRNO__INTERNAL;
  845. }
  846. prog->instances.fds = malloc(sizeof(int));
  847. if (!prog->instances.fds) {
  848. pr_warning("Not enough memory for BPF fds\n");
  849. return -ENOMEM;
  850. }
  851. prog->instances.nr = 1;
  852. prog->instances.fds[0] = -1;
  853. }
  854. if (!prog->preprocessor) {
  855. if (prog->instances.nr != 1) {
  856. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  857. prog->section_name, prog->instances.nr);
  858. }
  859. err = load_program(prog->type, prog->insns, prog->insns_cnt,
  860. license, kern_version, &fd);
  861. if (!err)
  862. prog->instances.fds[0] = fd;
  863. goto out;
  864. }
  865. for (i = 0; i < prog->instances.nr; i++) {
  866. struct bpf_prog_prep_result result;
  867. bpf_program_prep_t preprocessor = prog->preprocessor;
  868. bzero(&result, sizeof(result));
  869. err = preprocessor(prog, i, prog->insns,
  870. prog->insns_cnt, &result);
  871. if (err) {
  872. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  873. i, prog->section_name);
  874. goto out;
  875. }
  876. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  877. pr_debug("Skip loading the %dth instance of program '%s'\n",
  878. i, prog->section_name);
  879. prog->instances.fds[i] = -1;
  880. if (result.pfd)
  881. *result.pfd = -1;
  882. continue;
  883. }
  884. err = load_program(prog->type, result.new_insn_ptr,
  885. result.new_insn_cnt,
  886. license, kern_version, &fd);
  887. if (err) {
  888. pr_warning("Loading the %dth instance of program '%s' failed\n",
  889. i, prog->section_name);
  890. goto out;
  891. }
  892. if (result.pfd)
  893. *result.pfd = fd;
  894. prog->instances.fds[i] = fd;
  895. }
  896. out:
  897. if (err)
  898. pr_warning("failed to load program '%s'\n",
  899. prog->section_name);
  900. zfree(&prog->insns);
  901. prog->insns_cnt = 0;
  902. return err;
  903. }
  904. static int
  905. bpf_object__load_progs(struct bpf_object *obj)
  906. {
  907. size_t i;
  908. int err;
  909. for (i = 0; i < obj->nr_programs; i++) {
  910. err = bpf_program__load(&obj->programs[i],
  911. obj->license,
  912. obj->kern_version);
  913. if (err)
  914. return err;
  915. }
  916. return 0;
  917. }
  918. static int bpf_object__validate(struct bpf_object *obj)
  919. {
  920. if (obj->kern_version == 0) {
  921. pr_warning("%s doesn't provide kernel version\n",
  922. obj->path);
  923. return -LIBBPF_ERRNO__KVERSION;
  924. }
  925. return 0;
  926. }
  927. static struct bpf_object *
  928. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
  929. {
  930. struct bpf_object *obj;
  931. int err;
  932. if (elf_version(EV_CURRENT) == EV_NONE) {
  933. pr_warning("failed to init libelf for %s\n", path);
  934. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  935. }
  936. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  937. if (IS_ERR(obj))
  938. return obj;
  939. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  940. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  941. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  942. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  943. CHECK_ERR(bpf_object__validate(obj), err, out);
  944. bpf_object__elf_finish(obj);
  945. return obj;
  946. out:
  947. bpf_object__close(obj);
  948. return ERR_PTR(err);
  949. }
  950. struct bpf_object *bpf_object__open(const char *path)
  951. {
  952. /* param validation */
  953. if (!path)
  954. return NULL;
  955. pr_debug("loading %s\n", path);
  956. return __bpf_object__open(path, NULL, 0);
  957. }
  958. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  959. size_t obj_buf_sz,
  960. const char *name)
  961. {
  962. char tmp_name[64];
  963. /* param validation */
  964. if (!obj_buf || obj_buf_sz <= 0)
  965. return NULL;
  966. if (!name) {
  967. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  968. (unsigned long)obj_buf,
  969. (unsigned long)obj_buf_sz);
  970. tmp_name[sizeof(tmp_name) - 1] = '\0';
  971. name = tmp_name;
  972. }
  973. pr_debug("loading object '%s' from buffer\n",
  974. name);
  975. return __bpf_object__open(name, obj_buf, obj_buf_sz);
  976. }
  977. int bpf_object__unload(struct bpf_object *obj)
  978. {
  979. size_t i;
  980. if (!obj)
  981. return -EINVAL;
  982. for (i = 0; i < obj->nr_maps; i++)
  983. zclose(obj->maps[i].fd);
  984. for (i = 0; i < obj->nr_programs; i++)
  985. bpf_program__unload(&obj->programs[i]);
  986. return 0;
  987. }
  988. int bpf_object__load(struct bpf_object *obj)
  989. {
  990. int err;
  991. if (!obj)
  992. return -EINVAL;
  993. if (obj->loaded) {
  994. pr_warning("object should not be loaded twice\n");
  995. return -EINVAL;
  996. }
  997. obj->loaded = true;
  998. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  999. CHECK_ERR(bpf_object__relocate(obj), err, out);
  1000. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  1001. return 0;
  1002. out:
  1003. bpf_object__unload(obj);
  1004. pr_warning("failed to load object '%s'\n", obj->path);
  1005. return err;
  1006. }
  1007. void bpf_object__close(struct bpf_object *obj)
  1008. {
  1009. size_t i;
  1010. if (!obj)
  1011. return;
  1012. bpf_object__elf_finish(obj);
  1013. bpf_object__unload(obj);
  1014. for (i = 0; i < obj->nr_maps; i++) {
  1015. zfree(&obj->maps[i].name);
  1016. if (obj->maps[i].clear_priv)
  1017. obj->maps[i].clear_priv(&obj->maps[i],
  1018. obj->maps[i].priv);
  1019. obj->maps[i].priv = NULL;
  1020. obj->maps[i].clear_priv = NULL;
  1021. }
  1022. zfree(&obj->maps);
  1023. obj->nr_maps = 0;
  1024. if (obj->programs && obj->nr_programs) {
  1025. for (i = 0; i < obj->nr_programs; i++)
  1026. bpf_program__exit(&obj->programs[i]);
  1027. }
  1028. zfree(&obj->programs);
  1029. list_del(&obj->list);
  1030. free(obj);
  1031. }
  1032. struct bpf_object *
  1033. bpf_object__next(struct bpf_object *prev)
  1034. {
  1035. struct bpf_object *next;
  1036. if (!prev)
  1037. next = list_first_entry(&bpf_objects_list,
  1038. struct bpf_object,
  1039. list);
  1040. else
  1041. next = list_next_entry(prev, list);
  1042. /* Empty list is noticed here so don't need checking on entry. */
  1043. if (&next->list == &bpf_objects_list)
  1044. return NULL;
  1045. return next;
  1046. }
  1047. const char *bpf_object__name(struct bpf_object *obj)
  1048. {
  1049. return obj ? obj->path : ERR_PTR(-EINVAL);
  1050. }
  1051. unsigned int bpf_object__kversion(struct bpf_object *obj)
  1052. {
  1053. return obj ? obj->kern_version : 0;
  1054. }
  1055. struct bpf_program *
  1056. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1057. {
  1058. size_t idx;
  1059. if (!obj->programs)
  1060. return NULL;
  1061. /* First handler */
  1062. if (prev == NULL)
  1063. return &obj->programs[0];
  1064. if (prev->obj != obj) {
  1065. pr_warning("error: program handler doesn't match object\n");
  1066. return NULL;
  1067. }
  1068. idx = (prev - obj->programs) + 1;
  1069. if (idx >= obj->nr_programs)
  1070. return NULL;
  1071. return &obj->programs[idx];
  1072. }
  1073. int bpf_program__set_priv(struct bpf_program *prog, void *priv,
  1074. bpf_program_clear_priv_t clear_priv)
  1075. {
  1076. if (prog->priv && prog->clear_priv)
  1077. prog->clear_priv(prog, prog->priv);
  1078. prog->priv = priv;
  1079. prog->clear_priv = clear_priv;
  1080. return 0;
  1081. }
  1082. void *bpf_program__priv(struct bpf_program *prog)
  1083. {
  1084. return prog ? prog->priv : ERR_PTR(-EINVAL);
  1085. }
  1086. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1087. {
  1088. const char *title;
  1089. title = prog->section_name;
  1090. if (needs_copy) {
  1091. title = strdup(title);
  1092. if (!title) {
  1093. pr_warning("failed to strdup program title\n");
  1094. return ERR_PTR(-ENOMEM);
  1095. }
  1096. }
  1097. return title;
  1098. }
  1099. int bpf_program__fd(struct bpf_program *prog)
  1100. {
  1101. return bpf_program__nth_fd(prog, 0);
  1102. }
  1103. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1104. bpf_program_prep_t prep)
  1105. {
  1106. int *instances_fds;
  1107. if (nr_instances <= 0 || !prep)
  1108. return -EINVAL;
  1109. if (prog->instances.nr > 0 || prog->instances.fds) {
  1110. pr_warning("Can't set pre-processor after loading\n");
  1111. return -EINVAL;
  1112. }
  1113. instances_fds = malloc(sizeof(int) * nr_instances);
  1114. if (!instances_fds) {
  1115. pr_warning("alloc memory failed for fds\n");
  1116. return -ENOMEM;
  1117. }
  1118. /* fill all fd with -1 */
  1119. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1120. prog->instances.nr = nr_instances;
  1121. prog->instances.fds = instances_fds;
  1122. prog->preprocessor = prep;
  1123. return 0;
  1124. }
  1125. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1126. {
  1127. int fd;
  1128. if (n >= prog->instances.nr || n < 0) {
  1129. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1130. n, prog->section_name, prog->instances.nr);
  1131. return -EINVAL;
  1132. }
  1133. fd = prog->instances.fds[n];
  1134. if (fd < 0) {
  1135. pr_warning("%dth instance of program '%s' is invalid\n",
  1136. n, prog->section_name);
  1137. return -ENOENT;
  1138. }
  1139. return fd;
  1140. }
  1141. static void bpf_program__set_type(struct bpf_program *prog,
  1142. enum bpf_prog_type type)
  1143. {
  1144. prog->type = type;
  1145. }
  1146. int bpf_program__set_tracepoint(struct bpf_program *prog)
  1147. {
  1148. if (!prog)
  1149. return -EINVAL;
  1150. bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1151. return 0;
  1152. }
  1153. int bpf_program__set_kprobe(struct bpf_program *prog)
  1154. {
  1155. if (!prog)
  1156. return -EINVAL;
  1157. bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
  1158. return 0;
  1159. }
  1160. static bool bpf_program__is_type(struct bpf_program *prog,
  1161. enum bpf_prog_type type)
  1162. {
  1163. return prog ? (prog->type == type) : false;
  1164. }
  1165. bool bpf_program__is_tracepoint(struct bpf_program *prog)
  1166. {
  1167. return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1168. }
  1169. bool bpf_program__is_kprobe(struct bpf_program *prog)
  1170. {
  1171. return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
  1172. }
  1173. int bpf_map__fd(struct bpf_map *map)
  1174. {
  1175. return map ? map->fd : -EINVAL;
  1176. }
  1177. const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
  1178. {
  1179. return map ? &map->def : ERR_PTR(-EINVAL);
  1180. }
  1181. const char *bpf_map__name(struct bpf_map *map)
  1182. {
  1183. return map ? map->name : NULL;
  1184. }
  1185. int bpf_map__set_priv(struct bpf_map *map, void *priv,
  1186. bpf_map_clear_priv_t clear_priv)
  1187. {
  1188. if (!map)
  1189. return -EINVAL;
  1190. if (map->priv) {
  1191. if (map->clear_priv)
  1192. map->clear_priv(map, map->priv);
  1193. }
  1194. map->priv = priv;
  1195. map->clear_priv = clear_priv;
  1196. return 0;
  1197. }
  1198. void *bpf_map__priv(struct bpf_map *map)
  1199. {
  1200. return map ? map->priv : ERR_PTR(-EINVAL);
  1201. }
  1202. struct bpf_map *
  1203. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1204. {
  1205. size_t idx;
  1206. struct bpf_map *s, *e;
  1207. if (!obj || !obj->maps)
  1208. return NULL;
  1209. s = obj->maps;
  1210. e = obj->maps + obj->nr_maps;
  1211. if (prev == NULL)
  1212. return s;
  1213. if ((prev < s) || (prev >= e)) {
  1214. pr_warning("error in %s: map handler doesn't belong to object\n",
  1215. __func__);
  1216. return NULL;
  1217. }
  1218. idx = (prev - obj->maps) + 1;
  1219. if (idx >= obj->nr_maps)
  1220. return NULL;
  1221. return &obj->maps[idx];
  1222. }
  1223. struct bpf_map *
  1224. bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
  1225. {
  1226. struct bpf_map *pos;
  1227. bpf_map__for_each(pos, obj) {
  1228. if (pos->name && !strcmp(pos->name, name))
  1229. return pos;
  1230. }
  1231. return NULL;
  1232. }