test_verifier.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdio.h>
  11. #include <unistd.h>
  12. #include <linux/bpf.h>
  13. #include <errno.h>
  14. #include <linux/unistd.h>
  15. #include <string.h>
  16. #include <linux/filter.h>
  17. #include <stddef.h>
  18. #include <stdbool.h>
  19. #include <sys/resource.h>
  20. #include "libbpf.h"
  21. #define MAX_INSNS 512
  22. #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
  23. #define MAX_FIXUPS 8
  24. struct bpf_test {
  25. const char *descr;
  26. struct bpf_insn insns[MAX_INSNS];
  27. int fixup[MAX_FIXUPS];
  28. int prog_array_fixup[MAX_FIXUPS];
  29. int test_val_map_fixup[MAX_FIXUPS];
  30. const char *errstr;
  31. const char *errstr_unpriv;
  32. enum {
  33. UNDEF,
  34. ACCEPT,
  35. REJECT
  36. } result, result_unpriv;
  37. enum bpf_prog_type prog_type;
  38. };
  39. /* Note we want this to be 64 bit aligned so that the end of our array is
  40. * actually the end of the structure.
  41. */
  42. #define MAX_ENTRIES 11
  43. struct test_val {
  44. unsigned index;
  45. int foo[MAX_ENTRIES];
  46. };
  47. struct other_val {
  48. unsigned int action[32];
  49. };
  50. static struct bpf_test tests[] = {
  51. {
  52. "add+sub+mul",
  53. .insns = {
  54. BPF_MOV64_IMM(BPF_REG_1, 1),
  55. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  56. BPF_MOV64_IMM(BPF_REG_2, 3),
  57. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  58. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  59. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  60. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  61. BPF_EXIT_INSN(),
  62. },
  63. .result = ACCEPT,
  64. },
  65. {
  66. "unreachable",
  67. .insns = {
  68. BPF_EXIT_INSN(),
  69. BPF_EXIT_INSN(),
  70. },
  71. .errstr = "unreachable",
  72. .result = REJECT,
  73. },
  74. {
  75. "unreachable2",
  76. .insns = {
  77. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  78. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  79. BPF_EXIT_INSN(),
  80. },
  81. .errstr = "unreachable",
  82. .result = REJECT,
  83. },
  84. {
  85. "out of range jump",
  86. .insns = {
  87. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  88. BPF_EXIT_INSN(),
  89. },
  90. .errstr = "jump out of range",
  91. .result = REJECT,
  92. },
  93. {
  94. "out of range jump2",
  95. .insns = {
  96. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  97. BPF_EXIT_INSN(),
  98. },
  99. .errstr = "jump out of range",
  100. .result = REJECT,
  101. },
  102. {
  103. "test1 ld_imm64",
  104. .insns = {
  105. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  106. BPF_LD_IMM64(BPF_REG_0, 0),
  107. BPF_LD_IMM64(BPF_REG_0, 0),
  108. BPF_LD_IMM64(BPF_REG_0, 1),
  109. BPF_LD_IMM64(BPF_REG_0, 1),
  110. BPF_MOV64_IMM(BPF_REG_0, 2),
  111. BPF_EXIT_INSN(),
  112. },
  113. .errstr = "invalid BPF_LD_IMM insn",
  114. .errstr_unpriv = "R1 pointer comparison",
  115. .result = REJECT,
  116. },
  117. {
  118. "test2 ld_imm64",
  119. .insns = {
  120. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  121. BPF_LD_IMM64(BPF_REG_0, 0),
  122. BPF_LD_IMM64(BPF_REG_0, 0),
  123. BPF_LD_IMM64(BPF_REG_0, 1),
  124. BPF_LD_IMM64(BPF_REG_0, 1),
  125. BPF_EXIT_INSN(),
  126. },
  127. .errstr = "invalid BPF_LD_IMM insn",
  128. .errstr_unpriv = "R1 pointer comparison",
  129. .result = REJECT,
  130. },
  131. {
  132. "test3 ld_imm64",
  133. .insns = {
  134. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  135. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  136. BPF_LD_IMM64(BPF_REG_0, 0),
  137. BPF_LD_IMM64(BPF_REG_0, 0),
  138. BPF_LD_IMM64(BPF_REG_0, 1),
  139. BPF_LD_IMM64(BPF_REG_0, 1),
  140. BPF_EXIT_INSN(),
  141. },
  142. .errstr = "invalid bpf_ld_imm64 insn",
  143. .result = REJECT,
  144. },
  145. {
  146. "test4 ld_imm64",
  147. .insns = {
  148. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  149. BPF_EXIT_INSN(),
  150. },
  151. .errstr = "invalid bpf_ld_imm64 insn",
  152. .result = REJECT,
  153. },
  154. {
  155. "test5 ld_imm64",
  156. .insns = {
  157. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  158. },
  159. .errstr = "invalid bpf_ld_imm64 insn",
  160. .result = REJECT,
  161. },
  162. {
  163. "no bpf_exit",
  164. .insns = {
  165. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  166. },
  167. .errstr = "jump out of range",
  168. .result = REJECT,
  169. },
  170. {
  171. "loop (back-edge)",
  172. .insns = {
  173. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  174. BPF_EXIT_INSN(),
  175. },
  176. .errstr = "back-edge",
  177. .result = REJECT,
  178. },
  179. {
  180. "loop2 (back-edge)",
  181. .insns = {
  182. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  183. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  184. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  185. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  186. BPF_EXIT_INSN(),
  187. },
  188. .errstr = "back-edge",
  189. .result = REJECT,
  190. },
  191. {
  192. "conditional loop",
  193. .insns = {
  194. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  195. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  196. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  197. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  198. BPF_EXIT_INSN(),
  199. },
  200. .errstr = "back-edge",
  201. .result = REJECT,
  202. },
  203. {
  204. "read uninitialized register",
  205. .insns = {
  206. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  207. BPF_EXIT_INSN(),
  208. },
  209. .errstr = "R2 !read_ok",
  210. .result = REJECT,
  211. },
  212. {
  213. "read invalid register",
  214. .insns = {
  215. BPF_MOV64_REG(BPF_REG_0, -1),
  216. BPF_EXIT_INSN(),
  217. },
  218. .errstr = "R15 is invalid",
  219. .result = REJECT,
  220. },
  221. {
  222. "program doesn't init R0 before exit",
  223. .insns = {
  224. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  225. BPF_EXIT_INSN(),
  226. },
  227. .errstr = "R0 !read_ok",
  228. .result = REJECT,
  229. },
  230. {
  231. "program doesn't init R0 before exit in all branches",
  232. .insns = {
  233. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  234. BPF_MOV64_IMM(BPF_REG_0, 1),
  235. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  236. BPF_EXIT_INSN(),
  237. },
  238. .errstr = "R0 !read_ok",
  239. .errstr_unpriv = "R1 pointer comparison",
  240. .result = REJECT,
  241. },
  242. {
  243. "stack out of bounds",
  244. .insns = {
  245. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  246. BPF_EXIT_INSN(),
  247. },
  248. .errstr = "invalid stack",
  249. .result = REJECT,
  250. },
  251. {
  252. "invalid call insn1",
  253. .insns = {
  254. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  255. BPF_EXIT_INSN(),
  256. },
  257. .errstr = "BPF_CALL uses reserved",
  258. .result = REJECT,
  259. },
  260. {
  261. "invalid call insn2",
  262. .insns = {
  263. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  264. BPF_EXIT_INSN(),
  265. },
  266. .errstr = "BPF_CALL uses reserved",
  267. .result = REJECT,
  268. },
  269. {
  270. "invalid function call",
  271. .insns = {
  272. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  273. BPF_EXIT_INSN(),
  274. },
  275. .errstr = "invalid func 1234567",
  276. .result = REJECT,
  277. },
  278. {
  279. "uninitialized stack1",
  280. .insns = {
  281. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  282. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  283. BPF_LD_MAP_FD(BPF_REG_1, 0),
  284. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  285. BPF_EXIT_INSN(),
  286. },
  287. .fixup = {2},
  288. .errstr = "invalid indirect read from stack",
  289. .result = REJECT,
  290. },
  291. {
  292. "uninitialized stack2",
  293. .insns = {
  294. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  295. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  296. BPF_EXIT_INSN(),
  297. },
  298. .errstr = "invalid read from stack",
  299. .result = REJECT,
  300. },
  301. {
  302. "invalid argument register",
  303. .insns = {
  304. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
  305. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
  306. BPF_EXIT_INSN(),
  307. },
  308. .errstr = "R1 !read_ok",
  309. .result = REJECT,
  310. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  311. },
  312. {
  313. "non-invalid argument register",
  314. .insns = {
  315. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  316. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
  317. BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
  318. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
  319. BPF_EXIT_INSN(),
  320. },
  321. .result = ACCEPT,
  322. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  323. },
  324. {
  325. "check valid spill/fill",
  326. .insns = {
  327. /* spill R1(ctx) into stack */
  328. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  329. /* fill it back into R2 */
  330. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  331. /* should be able to access R0 = *(R2 + 8) */
  332. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  333. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  334. BPF_EXIT_INSN(),
  335. },
  336. .errstr_unpriv = "R0 leaks addr",
  337. .result = ACCEPT,
  338. .result_unpriv = REJECT,
  339. },
  340. {
  341. "check valid spill/fill, skb mark",
  342. .insns = {
  343. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  344. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  345. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  346. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  347. offsetof(struct __sk_buff, mark)),
  348. BPF_EXIT_INSN(),
  349. },
  350. .result = ACCEPT,
  351. .result_unpriv = ACCEPT,
  352. },
  353. {
  354. "check corrupted spill/fill",
  355. .insns = {
  356. /* spill R1(ctx) into stack */
  357. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  358. /* mess up with R1 pointer on stack */
  359. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  360. /* fill back into R0 should fail */
  361. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  362. BPF_EXIT_INSN(),
  363. },
  364. .errstr_unpriv = "attempt to corrupt spilled",
  365. .errstr = "corrupted spill",
  366. .result = REJECT,
  367. },
  368. {
  369. "invalid src register in STX",
  370. .insns = {
  371. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  372. BPF_EXIT_INSN(),
  373. },
  374. .errstr = "R15 is invalid",
  375. .result = REJECT,
  376. },
  377. {
  378. "invalid dst register in STX",
  379. .insns = {
  380. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  381. BPF_EXIT_INSN(),
  382. },
  383. .errstr = "R14 is invalid",
  384. .result = REJECT,
  385. },
  386. {
  387. "invalid dst register in ST",
  388. .insns = {
  389. BPF_ST_MEM(BPF_B, 14, -1, -1),
  390. BPF_EXIT_INSN(),
  391. },
  392. .errstr = "R14 is invalid",
  393. .result = REJECT,
  394. },
  395. {
  396. "invalid src register in LDX",
  397. .insns = {
  398. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  399. BPF_EXIT_INSN(),
  400. },
  401. .errstr = "R12 is invalid",
  402. .result = REJECT,
  403. },
  404. {
  405. "invalid dst register in LDX",
  406. .insns = {
  407. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  408. BPF_EXIT_INSN(),
  409. },
  410. .errstr = "R11 is invalid",
  411. .result = REJECT,
  412. },
  413. {
  414. "junk insn",
  415. .insns = {
  416. BPF_RAW_INSN(0, 0, 0, 0, 0),
  417. BPF_EXIT_INSN(),
  418. },
  419. .errstr = "invalid BPF_LD_IMM",
  420. .result = REJECT,
  421. },
  422. {
  423. "junk insn2",
  424. .insns = {
  425. BPF_RAW_INSN(1, 0, 0, 0, 0),
  426. BPF_EXIT_INSN(),
  427. },
  428. .errstr = "BPF_LDX uses reserved fields",
  429. .result = REJECT,
  430. },
  431. {
  432. "junk insn3",
  433. .insns = {
  434. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  435. BPF_EXIT_INSN(),
  436. },
  437. .errstr = "invalid BPF_ALU opcode f0",
  438. .result = REJECT,
  439. },
  440. {
  441. "junk insn4",
  442. .insns = {
  443. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  444. BPF_EXIT_INSN(),
  445. },
  446. .errstr = "invalid BPF_ALU opcode f0",
  447. .result = REJECT,
  448. },
  449. {
  450. "junk insn5",
  451. .insns = {
  452. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  453. BPF_EXIT_INSN(),
  454. },
  455. .errstr = "BPF_ALU uses reserved fields",
  456. .result = REJECT,
  457. },
  458. {
  459. "misaligned read from stack",
  460. .insns = {
  461. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  462. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  463. BPF_EXIT_INSN(),
  464. },
  465. .errstr = "misaligned access",
  466. .result = REJECT,
  467. },
  468. {
  469. "invalid map_fd for function call",
  470. .insns = {
  471. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  472. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  473. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  474. BPF_LD_MAP_FD(BPF_REG_1, 0),
  475. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  476. BPF_EXIT_INSN(),
  477. },
  478. .errstr = "fd 0 is not pointing to valid bpf_map",
  479. .result = REJECT,
  480. },
  481. {
  482. "don't check return value before access",
  483. .insns = {
  484. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  485. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  486. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  487. BPF_LD_MAP_FD(BPF_REG_1, 0),
  488. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  489. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  490. BPF_EXIT_INSN(),
  491. },
  492. .fixup = {3},
  493. .errstr = "R0 invalid mem access 'map_value_or_null'",
  494. .result = REJECT,
  495. },
  496. {
  497. "access memory with incorrect alignment",
  498. .insns = {
  499. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  500. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  501. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  502. BPF_LD_MAP_FD(BPF_REG_1, 0),
  503. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  504. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  505. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  506. BPF_EXIT_INSN(),
  507. },
  508. .fixup = {3},
  509. .errstr = "misaligned access",
  510. .result = REJECT,
  511. },
  512. {
  513. "sometimes access memory with incorrect alignment",
  514. .insns = {
  515. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  516. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  517. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  518. BPF_LD_MAP_FD(BPF_REG_1, 0),
  519. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  520. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  521. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  522. BPF_EXIT_INSN(),
  523. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  524. BPF_EXIT_INSN(),
  525. },
  526. .fixup = {3},
  527. .errstr = "R0 invalid mem access",
  528. .errstr_unpriv = "R0 leaks addr",
  529. .result = REJECT,
  530. },
  531. {
  532. "jump test 1",
  533. .insns = {
  534. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  535. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  536. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  537. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  538. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  539. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  540. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  541. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  542. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  543. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  544. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  545. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  546. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  547. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  548. BPF_MOV64_IMM(BPF_REG_0, 0),
  549. BPF_EXIT_INSN(),
  550. },
  551. .errstr_unpriv = "R1 pointer comparison",
  552. .result_unpriv = REJECT,
  553. .result = ACCEPT,
  554. },
  555. {
  556. "jump test 2",
  557. .insns = {
  558. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  559. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  560. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  561. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  562. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  563. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  564. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  565. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  566. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  567. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  569. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  570. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  571. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  572. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  573. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  574. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  575. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  576. BPF_MOV64_IMM(BPF_REG_0, 0),
  577. BPF_EXIT_INSN(),
  578. },
  579. .errstr_unpriv = "R1 pointer comparison",
  580. .result_unpriv = REJECT,
  581. .result = ACCEPT,
  582. },
  583. {
  584. "jump test 3",
  585. .insns = {
  586. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  587. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  588. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  589. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  590. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  591. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  592. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  593. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  594. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  595. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  596. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  597. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  598. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  599. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  600. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  601. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  602. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  603. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  604. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  605. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  606. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  607. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  608. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  609. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  610. BPF_LD_MAP_FD(BPF_REG_1, 0),
  611. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  612. BPF_EXIT_INSN(),
  613. },
  614. .fixup = {24},
  615. .errstr_unpriv = "R1 pointer comparison",
  616. .result_unpriv = REJECT,
  617. .result = ACCEPT,
  618. },
  619. {
  620. "jump test 4",
  621. .insns = {
  622. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  623. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  624. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  625. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  626. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  627. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  628. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  629. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  630. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  631. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  632. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  633. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  634. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  635. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  636. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  637. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  638. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  639. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  640. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  641. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  642. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  643. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  644. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  645. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  646. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  647. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  648. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  649. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  650. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  651. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  652. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  653. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  654. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  655. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  656. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  657. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  658. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  659. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  660. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  661. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  662. BPF_MOV64_IMM(BPF_REG_0, 0),
  663. BPF_EXIT_INSN(),
  664. },
  665. .errstr_unpriv = "R1 pointer comparison",
  666. .result_unpriv = REJECT,
  667. .result = ACCEPT,
  668. },
  669. {
  670. "jump test 5",
  671. .insns = {
  672. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  673. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  674. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  675. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  676. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  677. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  678. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  679. BPF_MOV64_IMM(BPF_REG_0, 0),
  680. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  681. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  682. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  683. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  684. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  685. BPF_MOV64_IMM(BPF_REG_0, 0),
  686. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  687. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  688. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  689. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  690. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  691. BPF_MOV64_IMM(BPF_REG_0, 0),
  692. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  693. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  694. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  695. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  696. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  697. BPF_MOV64_IMM(BPF_REG_0, 0),
  698. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  699. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  700. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  701. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  702. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  703. BPF_MOV64_IMM(BPF_REG_0, 0),
  704. BPF_EXIT_INSN(),
  705. },
  706. .errstr_unpriv = "R1 pointer comparison",
  707. .result_unpriv = REJECT,
  708. .result = ACCEPT,
  709. },
  710. {
  711. "access skb fields ok",
  712. .insns = {
  713. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  714. offsetof(struct __sk_buff, len)),
  715. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  716. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  717. offsetof(struct __sk_buff, mark)),
  718. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  719. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  720. offsetof(struct __sk_buff, pkt_type)),
  721. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  722. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  723. offsetof(struct __sk_buff, queue_mapping)),
  724. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  725. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  726. offsetof(struct __sk_buff, protocol)),
  727. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  728. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  729. offsetof(struct __sk_buff, vlan_present)),
  730. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  731. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  732. offsetof(struct __sk_buff, vlan_tci)),
  733. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  734. BPF_EXIT_INSN(),
  735. },
  736. .result = ACCEPT,
  737. },
  738. {
  739. "access skb fields bad1",
  740. .insns = {
  741. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  742. BPF_EXIT_INSN(),
  743. },
  744. .errstr = "invalid bpf_context access",
  745. .result = REJECT,
  746. },
  747. {
  748. "access skb fields bad2",
  749. .insns = {
  750. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  751. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  752. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  753. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  754. BPF_LD_MAP_FD(BPF_REG_1, 0),
  755. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  756. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  757. BPF_EXIT_INSN(),
  758. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  759. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  760. offsetof(struct __sk_buff, pkt_type)),
  761. BPF_EXIT_INSN(),
  762. },
  763. .fixup = {4},
  764. .errstr = "different pointers",
  765. .errstr_unpriv = "R1 pointer comparison",
  766. .result = REJECT,
  767. },
  768. {
  769. "access skb fields bad3",
  770. .insns = {
  771. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  772. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  773. offsetof(struct __sk_buff, pkt_type)),
  774. BPF_EXIT_INSN(),
  775. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  776. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  777. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  778. BPF_LD_MAP_FD(BPF_REG_1, 0),
  779. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  780. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  781. BPF_EXIT_INSN(),
  782. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  783. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  784. },
  785. .fixup = {6},
  786. .errstr = "different pointers",
  787. .errstr_unpriv = "R1 pointer comparison",
  788. .result = REJECT,
  789. },
  790. {
  791. "access skb fields bad4",
  792. .insns = {
  793. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  794. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  795. offsetof(struct __sk_buff, len)),
  796. BPF_MOV64_IMM(BPF_REG_0, 0),
  797. BPF_EXIT_INSN(),
  798. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  799. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  800. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  801. BPF_LD_MAP_FD(BPF_REG_1, 0),
  802. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  803. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  804. BPF_EXIT_INSN(),
  805. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  806. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  807. },
  808. .fixup = {7},
  809. .errstr = "different pointers",
  810. .errstr_unpriv = "R1 pointer comparison",
  811. .result = REJECT,
  812. },
  813. {
  814. "check skb->mark is not writeable by sockets",
  815. .insns = {
  816. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  817. offsetof(struct __sk_buff, mark)),
  818. BPF_EXIT_INSN(),
  819. },
  820. .errstr = "invalid bpf_context access",
  821. .errstr_unpriv = "R1 leaks addr",
  822. .result = REJECT,
  823. },
  824. {
  825. "check skb->tc_index is not writeable by sockets",
  826. .insns = {
  827. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  828. offsetof(struct __sk_buff, tc_index)),
  829. BPF_EXIT_INSN(),
  830. },
  831. .errstr = "invalid bpf_context access",
  832. .errstr_unpriv = "R1 leaks addr",
  833. .result = REJECT,
  834. },
  835. {
  836. "check non-u32 access to cb",
  837. .insns = {
  838. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
  839. offsetof(struct __sk_buff, cb[0])),
  840. BPF_EXIT_INSN(),
  841. },
  842. .errstr = "invalid bpf_context access",
  843. .errstr_unpriv = "R1 leaks addr",
  844. .result = REJECT,
  845. },
  846. {
  847. "check out of range skb->cb access",
  848. .insns = {
  849. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  850. offsetof(struct __sk_buff, cb[0]) + 256),
  851. BPF_EXIT_INSN(),
  852. },
  853. .errstr = "invalid bpf_context access",
  854. .errstr_unpriv = "",
  855. .result = REJECT,
  856. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  857. },
  858. {
  859. "write skb fields from socket prog",
  860. .insns = {
  861. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  862. offsetof(struct __sk_buff, cb[4])),
  863. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  864. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  865. offsetof(struct __sk_buff, mark)),
  866. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  867. offsetof(struct __sk_buff, tc_index)),
  868. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  869. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  870. offsetof(struct __sk_buff, cb[0])),
  871. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  872. offsetof(struct __sk_buff, cb[2])),
  873. BPF_EXIT_INSN(),
  874. },
  875. .result = ACCEPT,
  876. .errstr_unpriv = "R1 leaks addr",
  877. .result_unpriv = REJECT,
  878. },
  879. {
  880. "write skb fields from tc_cls_act prog",
  881. .insns = {
  882. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  883. offsetof(struct __sk_buff, cb[0])),
  884. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  885. offsetof(struct __sk_buff, mark)),
  886. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  887. offsetof(struct __sk_buff, tc_index)),
  888. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  889. offsetof(struct __sk_buff, tc_index)),
  890. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  891. offsetof(struct __sk_buff, cb[3])),
  892. BPF_EXIT_INSN(),
  893. },
  894. .errstr_unpriv = "",
  895. .result_unpriv = REJECT,
  896. .result = ACCEPT,
  897. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  898. },
  899. {
  900. "PTR_TO_STACK store/load",
  901. .insns = {
  902. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  903. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  904. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  905. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  906. BPF_EXIT_INSN(),
  907. },
  908. .result = ACCEPT,
  909. },
  910. {
  911. "PTR_TO_STACK store/load - bad alignment on off",
  912. .insns = {
  913. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  914. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  915. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  916. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  917. BPF_EXIT_INSN(),
  918. },
  919. .result = REJECT,
  920. .errstr = "misaligned access off -6 size 8",
  921. },
  922. {
  923. "PTR_TO_STACK store/load - bad alignment on reg",
  924. .insns = {
  925. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  926. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  927. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  928. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  929. BPF_EXIT_INSN(),
  930. },
  931. .result = REJECT,
  932. .errstr = "misaligned access off -2 size 8",
  933. },
  934. {
  935. "PTR_TO_STACK store/load - out of bounds low",
  936. .insns = {
  937. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  938. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  939. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  940. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  941. BPF_EXIT_INSN(),
  942. },
  943. .result = REJECT,
  944. .errstr = "invalid stack off=-79992 size=8",
  945. },
  946. {
  947. "PTR_TO_STACK store/load - out of bounds high",
  948. .insns = {
  949. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  950. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  951. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  952. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  953. BPF_EXIT_INSN(),
  954. },
  955. .result = REJECT,
  956. .errstr = "invalid stack off=0 size=8",
  957. },
  958. {
  959. "unpriv: return pointer",
  960. .insns = {
  961. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  962. BPF_EXIT_INSN(),
  963. },
  964. .result = ACCEPT,
  965. .result_unpriv = REJECT,
  966. .errstr_unpriv = "R0 leaks addr",
  967. },
  968. {
  969. "unpriv: add const to pointer",
  970. .insns = {
  971. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  972. BPF_MOV64_IMM(BPF_REG_0, 0),
  973. BPF_EXIT_INSN(),
  974. },
  975. .result = ACCEPT,
  976. .result_unpriv = REJECT,
  977. .errstr_unpriv = "R1 pointer arithmetic",
  978. },
  979. {
  980. "unpriv: add pointer to pointer",
  981. .insns = {
  982. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  983. BPF_MOV64_IMM(BPF_REG_0, 0),
  984. BPF_EXIT_INSN(),
  985. },
  986. .result = ACCEPT,
  987. .result_unpriv = REJECT,
  988. .errstr_unpriv = "R1 pointer arithmetic",
  989. },
  990. {
  991. "unpriv: neg pointer",
  992. .insns = {
  993. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  994. BPF_MOV64_IMM(BPF_REG_0, 0),
  995. BPF_EXIT_INSN(),
  996. },
  997. .result = ACCEPT,
  998. .result_unpriv = REJECT,
  999. .errstr_unpriv = "R1 pointer arithmetic",
  1000. },
  1001. {
  1002. "unpriv: cmp pointer with const",
  1003. .insns = {
  1004. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1005. BPF_MOV64_IMM(BPF_REG_0, 0),
  1006. BPF_EXIT_INSN(),
  1007. },
  1008. .result = ACCEPT,
  1009. .result_unpriv = REJECT,
  1010. .errstr_unpriv = "R1 pointer comparison",
  1011. },
  1012. {
  1013. "unpriv: cmp pointer with pointer",
  1014. .insns = {
  1015. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1016. BPF_MOV64_IMM(BPF_REG_0, 0),
  1017. BPF_EXIT_INSN(),
  1018. },
  1019. .result = ACCEPT,
  1020. .result_unpriv = REJECT,
  1021. .errstr_unpriv = "R10 pointer comparison",
  1022. },
  1023. {
  1024. "unpriv: check that printk is disallowed",
  1025. .insns = {
  1026. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1027. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1028. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1029. BPF_MOV64_IMM(BPF_REG_2, 8),
  1030. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1031. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
  1032. BPF_MOV64_IMM(BPF_REG_0, 0),
  1033. BPF_EXIT_INSN(),
  1034. },
  1035. .errstr_unpriv = "unknown func 6",
  1036. .result_unpriv = REJECT,
  1037. .result = ACCEPT,
  1038. },
  1039. {
  1040. "unpriv: pass pointer to helper function",
  1041. .insns = {
  1042. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1043. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1044. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1045. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1046. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1047. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1048. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
  1049. BPF_MOV64_IMM(BPF_REG_0, 0),
  1050. BPF_EXIT_INSN(),
  1051. },
  1052. .fixup = {3},
  1053. .errstr_unpriv = "R4 leaks addr",
  1054. .result_unpriv = REJECT,
  1055. .result = ACCEPT,
  1056. },
  1057. {
  1058. "unpriv: indirectly pass pointer on stack to helper function",
  1059. .insns = {
  1060. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1061. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1062. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1063. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1064. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1065. BPF_MOV64_IMM(BPF_REG_0, 0),
  1066. BPF_EXIT_INSN(),
  1067. },
  1068. .fixup = {3},
  1069. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1070. .result = REJECT,
  1071. },
  1072. {
  1073. "unpriv: mangle pointer on stack 1",
  1074. .insns = {
  1075. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1076. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1077. BPF_MOV64_IMM(BPF_REG_0, 0),
  1078. BPF_EXIT_INSN(),
  1079. },
  1080. .errstr_unpriv = "attempt to corrupt spilled",
  1081. .result_unpriv = REJECT,
  1082. .result = ACCEPT,
  1083. },
  1084. {
  1085. "unpriv: mangle pointer on stack 2",
  1086. .insns = {
  1087. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1088. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1089. BPF_MOV64_IMM(BPF_REG_0, 0),
  1090. BPF_EXIT_INSN(),
  1091. },
  1092. .errstr_unpriv = "attempt to corrupt spilled",
  1093. .result_unpriv = REJECT,
  1094. .result = ACCEPT,
  1095. },
  1096. {
  1097. "unpriv: read pointer from stack in small chunks",
  1098. .insns = {
  1099. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1100. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1101. BPF_MOV64_IMM(BPF_REG_0, 0),
  1102. BPF_EXIT_INSN(),
  1103. },
  1104. .errstr = "invalid size",
  1105. .result = REJECT,
  1106. },
  1107. {
  1108. "unpriv: write pointer into ctx",
  1109. .insns = {
  1110. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1111. BPF_MOV64_IMM(BPF_REG_0, 0),
  1112. BPF_EXIT_INSN(),
  1113. },
  1114. .errstr_unpriv = "R1 leaks addr",
  1115. .result_unpriv = REJECT,
  1116. .errstr = "invalid bpf_context access",
  1117. .result = REJECT,
  1118. },
  1119. {
  1120. "unpriv: write pointer into map elem value",
  1121. .insns = {
  1122. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1123. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1124. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1125. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1126. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1127. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1128. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1129. BPF_EXIT_INSN(),
  1130. },
  1131. .fixup = {3},
  1132. .errstr_unpriv = "R0 leaks addr",
  1133. .result_unpriv = REJECT,
  1134. .result = ACCEPT,
  1135. },
  1136. {
  1137. "unpriv: partial copy of pointer",
  1138. .insns = {
  1139. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1140. BPF_MOV64_IMM(BPF_REG_0, 0),
  1141. BPF_EXIT_INSN(),
  1142. },
  1143. .errstr_unpriv = "R10 partial copy",
  1144. .result_unpriv = REJECT,
  1145. .result = ACCEPT,
  1146. },
  1147. {
  1148. "unpriv: pass pointer to tail_call",
  1149. .insns = {
  1150. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1151. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1152. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
  1153. BPF_MOV64_IMM(BPF_REG_0, 0),
  1154. BPF_EXIT_INSN(),
  1155. },
  1156. .prog_array_fixup = {1},
  1157. .errstr_unpriv = "R3 leaks addr into helper",
  1158. .result_unpriv = REJECT,
  1159. .result = ACCEPT,
  1160. },
  1161. {
  1162. "unpriv: cmp map pointer with zero",
  1163. .insns = {
  1164. BPF_MOV64_IMM(BPF_REG_1, 0),
  1165. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1166. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1167. BPF_MOV64_IMM(BPF_REG_0, 0),
  1168. BPF_EXIT_INSN(),
  1169. },
  1170. .fixup = {1},
  1171. .errstr_unpriv = "R1 pointer comparison",
  1172. .result_unpriv = REJECT,
  1173. .result = ACCEPT,
  1174. },
  1175. {
  1176. "unpriv: write into frame pointer",
  1177. .insns = {
  1178. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1179. BPF_MOV64_IMM(BPF_REG_0, 0),
  1180. BPF_EXIT_INSN(),
  1181. },
  1182. .errstr = "frame pointer is read only",
  1183. .result = REJECT,
  1184. },
  1185. {
  1186. "unpriv: cmp of frame pointer",
  1187. .insns = {
  1188. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1189. BPF_MOV64_IMM(BPF_REG_0, 0),
  1190. BPF_EXIT_INSN(),
  1191. },
  1192. .errstr_unpriv = "R10 pointer comparison",
  1193. .result_unpriv = REJECT,
  1194. .result = ACCEPT,
  1195. },
  1196. {
  1197. "unpriv: cmp of stack pointer",
  1198. .insns = {
  1199. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1200. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1201. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1202. BPF_MOV64_IMM(BPF_REG_0, 0),
  1203. BPF_EXIT_INSN(),
  1204. },
  1205. .errstr_unpriv = "R2 pointer comparison",
  1206. .result_unpriv = REJECT,
  1207. .result = ACCEPT,
  1208. },
  1209. {
  1210. "stack pointer arithmetic",
  1211. .insns = {
  1212. BPF_MOV64_IMM(BPF_REG_1, 4),
  1213. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1214. BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
  1215. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  1216. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  1217. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  1218. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
  1219. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  1220. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  1221. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  1222. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  1223. BPF_MOV64_IMM(BPF_REG_0, 0),
  1224. BPF_EXIT_INSN(),
  1225. },
  1226. .result = ACCEPT,
  1227. },
  1228. {
  1229. "raw_stack: no skb_load_bytes",
  1230. .insns = {
  1231. BPF_MOV64_IMM(BPF_REG_2, 4),
  1232. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1233. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1234. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1235. BPF_MOV64_IMM(BPF_REG_4, 8),
  1236. /* Call to skb_load_bytes() omitted. */
  1237. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1238. BPF_EXIT_INSN(),
  1239. },
  1240. .result = REJECT,
  1241. .errstr = "invalid read from stack off -8+0 size 8",
  1242. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1243. },
  1244. {
  1245. "raw_stack: skb_load_bytes, negative len",
  1246. .insns = {
  1247. BPF_MOV64_IMM(BPF_REG_2, 4),
  1248. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1249. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1250. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1251. BPF_MOV64_IMM(BPF_REG_4, -8),
  1252. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1253. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1254. BPF_EXIT_INSN(),
  1255. },
  1256. .result = REJECT,
  1257. .errstr = "invalid stack type R3",
  1258. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1259. },
  1260. {
  1261. "raw_stack: skb_load_bytes, negative len 2",
  1262. .insns = {
  1263. BPF_MOV64_IMM(BPF_REG_2, 4),
  1264. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1265. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1266. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1267. BPF_MOV64_IMM(BPF_REG_4, ~0),
  1268. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1269. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1270. BPF_EXIT_INSN(),
  1271. },
  1272. .result = REJECT,
  1273. .errstr = "invalid stack type R3",
  1274. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1275. },
  1276. {
  1277. "raw_stack: skb_load_bytes, zero len",
  1278. .insns = {
  1279. BPF_MOV64_IMM(BPF_REG_2, 4),
  1280. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1281. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1282. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1283. BPF_MOV64_IMM(BPF_REG_4, 0),
  1284. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1285. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1286. BPF_EXIT_INSN(),
  1287. },
  1288. .result = REJECT,
  1289. .errstr = "invalid stack type R3",
  1290. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1291. },
  1292. {
  1293. "raw_stack: skb_load_bytes, no init",
  1294. .insns = {
  1295. BPF_MOV64_IMM(BPF_REG_2, 4),
  1296. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1297. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1298. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1299. BPF_MOV64_IMM(BPF_REG_4, 8),
  1300. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1301. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1302. BPF_EXIT_INSN(),
  1303. },
  1304. .result = ACCEPT,
  1305. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1306. },
  1307. {
  1308. "raw_stack: skb_load_bytes, init",
  1309. .insns = {
  1310. BPF_MOV64_IMM(BPF_REG_2, 4),
  1311. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1312. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1313. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  1314. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1315. BPF_MOV64_IMM(BPF_REG_4, 8),
  1316. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1317. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1318. BPF_EXIT_INSN(),
  1319. },
  1320. .result = ACCEPT,
  1321. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1322. },
  1323. {
  1324. "raw_stack: skb_load_bytes, spilled regs around bounds",
  1325. .insns = {
  1326. BPF_MOV64_IMM(BPF_REG_2, 4),
  1327. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1328. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1329. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1330. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1331. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1332. BPF_MOV64_IMM(BPF_REG_4, 8),
  1333. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1334. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1335. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1336. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1337. offsetof(struct __sk_buff, mark)),
  1338. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1339. offsetof(struct __sk_buff, priority)),
  1340. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1341. BPF_EXIT_INSN(),
  1342. },
  1343. .result = ACCEPT,
  1344. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1345. },
  1346. {
  1347. "raw_stack: skb_load_bytes, spilled regs corruption",
  1348. .insns = {
  1349. BPF_MOV64_IMM(BPF_REG_2, 4),
  1350. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1351. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1352. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1353. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1354. BPF_MOV64_IMM(BPF_REG_4, 8),
  1355. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1356. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */
  1357. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1358. offsetof(struct __sk_buff, mark)),
  1359. BPF_EXIT_INSN(),
  1360. },
  1361. .result = REJECT,
  1362. .errstr = "R0 invalid mem access 'inv'",
  1363. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1364. },
  1365. {
  1366. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  1367. .insns = {
  1368. BPF_MOV64_IMM(BPF_REG_2, 4),
  1369. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1370. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1371. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1372. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1373. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1374. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1375. BPF_MOV64_IMM(BPF_REG_4, 8),
  1376. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1377. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1378. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1379. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill ctx into R3 */
  1380. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1381. offsetof(struct __sk_buff, mark)),
  1382. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1383. offsetof(struct __sk_buff, priority)),
  1384. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1385. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  1386. offsetof(struct __sk_buff, pkt_type)),
  1387. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1388. BPF_EXIT_INSN(),
  1389. },
  1390. .result = REJECT,
  1391. .errstr = "R3 invalid mem access 'inv'",
  1392. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1393. },
  1394. {
  1395. "raw_stack: skb_load_bytes, spilled regs + data",
  1396. .insns = {
  1397. BPF_MOV64_IMM(BPF_REG_2, 4),
  1398. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1399. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1400. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1401. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1402. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1403. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1404. BPF_MOV64_IMM(BPF_REG_4, 8),
  1405. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1406. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1407. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1408. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill data into R3 */
  1409. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1410. offsetof(struct __sk_buff, mark)),
  1411. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1412. offsetof(struct __sk_buff, priority)),
  1413. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1414. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1415. BPF_EXIT_INSN(),
  1416. },
  1417. .result = ACCEPT,
  1418. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1419. },
  1420. {
  1421. "raw_stack: skb_load_bytes, invalid access 1",
  1422. .insns = {
  1423. BPF_MOV64_IMM(BPF_REG_2, 4),
  1424. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  1426. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1427. BPF_MOV64_IMM(BPF_REG_4, 8),
  1428. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1429. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1430. BPF_EXIT_INSN(),
  1431. },
  1432. .result = REJECT,
  1433. .errstr = "invalid stack type R3 off=-513 access_size=8",
  1434. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1435. },
  1436. {
  1437. "raw_stack: skb_load_bytes, invalid access 2",
  1438. .insns = {
  1439. BPF_MOV64_IMM(BPF_REG_2, 4),
  1440. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1441. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1442. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1443. BPF_MOV64_IMM(BPF_REG_4, 8),
  1444. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1445. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1446. BPF_EXIT_INSN(),
  1447. },
  1448. .result = REJECT,
  1449. .errstr = "invalid stack type R3 off=-1 access_size=8",
  1450. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1451. },
  1452. {
  1453. "raw_stack: skb_load_bytes, invalid access 3",
  1454. .insns = {
  1455. BPF_MOV64_IMM(BPF_REG_2, 4),
  1456. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1457. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  1458. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1459. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  1460. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1461. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1462. BPF_EXIT_INSN(),
  1463. },
  1464. .result = REJECT,
  1465. .errstr = "invalid stack type R3 off=-1 access_size=-1",
  1466. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1467. },
  1468. {
  1469. "raw_stack: skb_load_bytes, invalid access 4",
  1470. .insns = {
  1471. BPF_MOV64_IMM(BPF_REG_2, 4),
  1472. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1473. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1474. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1475. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1476. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1477. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1478. BPF_EXIT_INSN(),
  1479. },
  1480. .result = REJECT,
  1481. .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
  1482. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1483. },
  1484. {
  1485. "raw_stack: skb_load_bytes, invalid access 5",
  1486. .insns = {
  1487. BPF_MOV64_IMM(BPF_REG_2, 4),
  1488. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1489. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1490. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1491. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1492. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1493. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1494. BPF_EXIT_INSN(),
  1495. },
  1496. .result = REJECT,
  1497. .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
  1498. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1499. },
  1500. {
  1501. "raw_stack: skb_load_bytes, invalid access 6",
  1502. .insns = {
  1503. BPF_MOV64_IMM(BPF_REG_2, 4),
  1504. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1505. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1506. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1507. BPF_MOV64_IMM(BPF_REG_4, 0),
  1508. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1509. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1510. BPF_EXIT_INSN(),
  1511. },
  1512. .result = REJECT,
  1513. .errstr = "invalid stack type R3 off=-512 access_size=0",
  1514. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1515. },
  1516. {
  1517. "raw_stack: skb_load_bytes, large access",
  1518. .insns = {
  1519. BPF_MOV64_IMM(BPF_REG_2, 4),
  1520. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1521. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1522. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1523. BPF_MOV64_IMM(BPF_REG_4, 512),
  1524. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1525. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1526. BPF_EXIT_INSN(),
  1527. },
  1528. .result = ACCEPT,
  1529. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1530. },
  1531. {
  1532. "direct packet access: test1",
  1533. .insns = {
  1534. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1535. offsetof(struct __sk_buff, data)),
  1536. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1537. offsetof(struct __sk_buff, data_end)),
  1538. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1539. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1540. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1541. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1542. BPF_MOV64_IMM(BPF_REG_0, 0),
  1543. BPF_EXIT_INSN(),
  1544. },
  1545. .result = ACCEPT,
  1546. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1547. },
  1548. {
  1549. "direct packet access: test2",
  1550. .insns = {
  1551. BPF_MOV64_IMM(BPF_REG_0, 1),
  1552. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  1553. offsetof(struct __sk_buff, data_end)),
  1554. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1555. offsetof(struct __sk_buff, data)),
  1556. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  1557. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  1558. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  1559. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  1560. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  1561. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  1562. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1563. offsetof(struct __sk_buff, data)),
  1564. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  1565. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  1566. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
  1567. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
  1568. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  1569. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  1570. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  1571. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1572. offsetof(struct __sk_buff, data_end)),
  1573. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  1574. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  1575. BPF_MOV64_IMM(BPF_REG_0, 0),
  1576. BPF_EXIT_INSN(),
  1577. },
  1578. .result = ACCEPT,
  1579. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1580. },
  1581. {
  1582. "direct packet access: test3",
  1583. .insns = {
  1584. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1585. offsetof(struct __sk_buff, data)),
  1586. BPF_MOV64_IMM(BPF_REG_0, 0),
  1587. BPF_EXIT_INSN(),
  1588. },
  1589. .errstr = "invalid bpf_context access off=76",
  1590. .result = REJECT,
  1591. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  1592. },
  1593. {
  1594. "direct packet access: test4 (write)",
  1595. .insns = {
  1596. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1597. offsetof(struct __sk_buff, data)),
  1598. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1599. offsetof(struct __sk_buff, data_end)),
  1600. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1601. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1602. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1603. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1604. BPF_MOV64_IMM(BPF_REG_0, 0),
  1605. BPF_EXIT_INSN(),
  1606. },
  1607. .result = ACCEPT,
  1608. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1609. },
  1610. {
  1611. "direct packet access: test5 (pkt_end >= reg, good access)",
  1612. .insns = {
  1613. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1614. offsetof(struct __sk_buff, data)),
  1615. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1616. offsetof(struct __sk_buff, data_end)),
  1617. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1618. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1619. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  1620. BPF_MOV64_IMM(BPF_REG_0, 1),
  1621. BPF_EXIT_INSN(),
  1622. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1623. BPF_MOV64_IMM(BPF_REG_0, 0),
  1624. BPF_EXIT_INSN(),
  1625. },
  1626. .result = ACCEPT,
  1627. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1628. },
  1629. {
  1630. "direct packet access: test6 (pkt_end >= reg, bad access)",
  1631. .insns = {
  1632. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1633. offsetof(struct __sk_buff, data)),
  1634. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1635. offsetof(struct __sk_buff, data_end)),
  1636. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1637. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1638. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  1639. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1640. BPF_MOV64_IMM(BPF_REG_0, 1),
  1641. BPF_EXIT_INSN(),
  1642. BPF_MOV64_IMM(BPF_REG_0, 0),
  1643. BPF_EXIT_INSN(),
  1644. },
  1645. .errstr = "invalid access to packet",
  1646. .result = REJECT,
  1647. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1648. },
  1649. {
  1650. "direct packet access: test7 (pkt_end >= reg, both accesses)",
  1651. .insns = {
  1652. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1653. offsetof(struct __sk_buff, data)),
  1654. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1655. offsetof(struct __sk_buff, data_end)),
  1656. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1657. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1658. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  1659. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1660. BPF_MOV64_IMM(BPF_REG_0, 1),
  1661. BPF_EXIT_INSN(),
  1662. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1663. BPF_MOV64_IMM(BPF_REG_0, 0),
  1664. BPF_EXIT_INSN(),
  1665. },
  1666. .errstr = "invalid access to packet",
  1667. .result = REJECT,
  1668. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1669. },
  1670. {
  1671. "direct packet access: test8 (double test, variant 1)",
  1672. .insns = {
  1673. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1674. offsetof(struct __sk_buff, data)),
  1675. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1676. offsetof(struct __sk_buff, data_end)),
  1677. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1678. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1679. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
  1680. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1681. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1682. BPF_MOV64_IMM(BPF_REG_0, 1),
  1683. BPF_EXIT_INSN(),
  1684. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1685. BPF_MOV64_IMM(BPF_REG_0, 0),
  1686. BPF_EXIT_INSN(),
  1687. },
  1688. .result = ACCEPT,
  1689. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1690. },
  1691. {
  1692. "direct packet access: test9 (double test, variant 2)",
  1693. .insns = {
  1694. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1695. offsetof(struct __sk_buff, data)),
  1696. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1697. offsetof(struct __sk_buff, data_end)),
  1698. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1699. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1700. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  1701. BPF_MOV64_IMM(BPF_REG_0, 1),
  1702. BPF_EXIT_INSN(),
  1703. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1704. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1705. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1706. BPF_MOV64_IMM(BPF_REG_0, 0),
  1707. BPF_EXIT_INSN(),
  1708. },
  1709. .result = ACCEPT,
  1710. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1711. },
  1712. {
  1713. "direct packet access: test10 (write invalid)",
  1714. .insns = {
  1715. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1716. offsetof(struct __sk_buff, data)),
  1717. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1718. offsetof(struct __sk_buff, data_end)),
  1719. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1720. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1721. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  1722. BPF_MOV64_IMM(BPF_REG_0, 0),
  1723. BPF_EXIT_INSN(),
  1724. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1725. BPF_MOV64_IMM(BPF_REG_0, 0),
  1726. BPF_EXIT_INSN(),
  1727. },
  1728. .errstr = "invalid access to packet",
  1729. .result = REJECT,
  1730. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1731. },
  1732. {
  1733. "helper access to packet: test1, valid packet_ptr range",
  1734. .insns = {
  1735. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1736. offsetof(struct xdp_md, data)),
  1737. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1738. offsetof(struct xdp_md, data_end)),
  1739. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  1740. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  1741. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  1742. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1743. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1744. BPF_MOV64_IMM(BPF_REG_4, 0),
  1745. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
  1746. BPF_MOV64_IMM(BPF_REG_0, 0),
  1747. BPF_EXIT_INSN(),
  1748. },
  1749. .fixup = {5},
  1750. .result_unpriv = ACCEPT,
  1751. .result = ACCEPT,
  1752. .prog_type = BPF_PROG_TYPE_XDP,
  1753. },
  1754. {
  1755. "helper access to packet: test2, unchecked packet_ptr",
  1756. .insns = {
  1757. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1758. offsetof(struct xdp_md, data)),
  1759. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1760. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1761. BPF_MOV64_IMM(BPF_REG_0, 0),
  1762. BPF_EXIT_INSN(),
  1763. },
  1764. .fixup = {1},
  1765. .result = REJECT,
  1766. .errstr = "invalid access to packet",
  1767. .prog_type = BPF_PROG_TYPE_XDP,
  1768. },
  1769. {
  1770. "helper access to packet: test3, variable add",
  1771. .insns = {
  1772. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1773. offsetof(struct xdp_md, data)),
  1774. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1775. offsetof(struct xdp_md, data_end)),
  1776. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1777. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  1778. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  1779. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  1780. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1781. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  1782. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  1783. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  1784. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  1785. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1786. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  1787. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1788. BPF_MOV64_IMM(BPF_REG_0, 0),
  1789. BPF_EXIT_INSN(),
  1790. },
  1791. .fixup = {11},
  1792. .result = ACCEPT,
  1793. .prog_type = BPF_PROG_TYPE_XDP,
  1794. },
  1795. {
  1796. "helper access to packet: test4, packet_ptr with bad range",
  1797. .insns = {
  1798. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1799. offsetof(struct xdp_md, data)),
  1800. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1801. offsetof(struct xdp_md, data_end)),
  1802. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1803. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  1804. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  1805. BPF_MOV64_IMM(BPF_REG_0, 0),
  1806. BPF_EXIT_INSN(),
  1807. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1808. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1809. BPF_MOV64_IMM(BPF_REG_0, 0),
  1810. BPF_EXIT_INSN(),
  1811. },
  1812. .fixup = {7},
  1813. .result = REJECT,
  1814. .errstr = "invalid access to packet",
  1815. .prog_type = BPF_PROG_TYPE_XDP,
  1816. },
  1817. {
  1818. "helper access to packet: test5, packet_ptr with too short range",
  1819. .insns = {
  1820. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1821. offsetof(struct xdp_md, data)),
  1822. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1823. offsetof(struct xdp_md, data_end)),
  1824. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  1825. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1826. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  1827. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  1828. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1829. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1830. BPF_MOV64_IMM(BPF_REG_0, 0),
  1831. BPF_EXIT_INSN(),
  1832. },
  1833. .fixup = {6},
  1834. .result = REJECT,
  1835. .errstr = "invalid access to packet",
  1836. .prog_type = BPF_PROG_TYPE_XDP,
  1837. },
  1838. {
  1839. "helper access to packet: test6, cls valid packet_ptr range",
  1840. .insns = {
  1841. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1842. offsetof(struct __sk_buff, data)),
  1843. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1844. offsetof(struct __sk_buff, data_end)),
  1845. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  1846. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  1847. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  1848. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1849. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1850. BPF_MOV64_IMM(BPF_REG_4, 0),
  1851. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
  1852. BPF_MOV64_IMM(BPF_REG_0, 0),
  1853. BPF_EXIT_INSN(),
  1854. },
  1855. .fixup = {5},
  1856. .result = ACCEPT,
  1857. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1858. },
  1859. {
  1860. "helper access to packet: test7, cls unchecked packet_ptr",
  1861. .insns = {
  1862. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1863. offsetof(struct __sk_buff, data)),
  1864. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1865. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1866. BPF_MOV64_IMM(BPF_REG_0, 0),
  1867. BPF_EXIT_INSN(),
  1868. },
  1869. .fixup = {1},
  1870. .result = REJECT,
  1871. .errstr = "invalid access to packet",
  1872. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1873. },
  1874. {
  1875. "helper access to packet: test8, cls variable add",
  1876. .insns = {
  1877. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1878. offsetof(struct __sk_buff, data)),
  1879. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1880. offsetof(struct __sk_buff, data_end)),
  1881. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1882. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  1883. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  1884. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  1885. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1886. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  1887. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  1888. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  1889. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  1890. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1891. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  1892. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1893. BPF_MOV64_IMM(BPF_REG_0, 0),
  1894. BPF_EXIT_INSN(),
  1895. },
  1896. .fixup = {11},
  1897. .result = ACCEPT,
  1898. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1899. },
  1900. {
  1901. "helper access to packet: test9, cls packet_ptr with bad range",
  1902. .insns = {
  1903. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1904. offsetof(struct __sk_buff, data)),
  1905. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1906. offsetof(struct __sk_buff, data_end)),
  1907. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1908. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  1909. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  1910. BPF_MOV64_IMM(BPF_REG_0, 0),
  1911. BPF_EXIT_INSN(),
  1912. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1913. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1914. BPF_MOV64_IMM(BPF_REG_0, 0),
  1915. BPF_EXIT_INSN(),
  1916. },
  1917. .fixup = {7},
  1918. .result = REJECT,
  1919. .errstr = "invalid access to packet",
  1920. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1921. },
  1922. {
  1923. "helper access to packet: test10, cls packet_ptr with too short range",
  1924. .insns = {
  1925. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1926. offsetof(struct __sk_buff, data)),
  1927. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1928. offsetof(struct __sk_buff, data_end)),
  1929. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  1930. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1931. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  1932. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  1933. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1934. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1935. BPF_MOV64_IMM(BPF_REG_0, 0),
  1936. BPF_EXIT_INSN(),
  1937. },
  1938. .fixup = {6},
  1939. .result = REJECT,
  1940. .errstr = "invalid access to packet",
  1941. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1942. },
  1943. {
  1944. "helper access to packet: test11, cls unsuitable helper 1",
  1945. .insns = {
  1946. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  1947. offsetof(struct __sk_buff, data)),
  1948. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  1949. offsetof(struct __sk_buff, data_end)),
  1950. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  1951. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1952. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
  1953. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
  1954. BPF_MOV64_IMM(BPF_REG_2, 0),
  1955. BPF_MOV64_IMM(BPF_REG_4, 42),
  1956. BPF_MOV64_IMM(BPF_REG_5, 0),
  1957. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
  1958. BPF_MOV64_IMM(BPF_REG_0, 0),
  1959. BPF_EXIT_INSN(),
  1960. },
  1961. .result = REJECT,
  1962. .errstr = "helper access to the packet",
  1963. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1964. },
  1965. {
  1966. "helper access to packet: test12, cls unsuitable helper 2",
  1967. .insns = {
  1968. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  1969. offsetof(struct __sk_buff, data)),
  1970. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  1971. offsetof(struct __sk_buff, data_end)),
  1972. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1973. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  1974. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
  1975. BPF_MOV64_IMM(BPF_REG_2, 0),
  1976. BPF_MOV64_IMM(BPF_REG_4, 4),
  1977. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1978. BPF_MOV64_IMM(BPF_REG_0, 0),
  1979. BPF_EXIT_INSN(),
  1980. },
  1981. .result = REJECT,
  1982. .errstr = "helper access to the packet",
  1983. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1984. },
  1985. {
  1986. "helper access to packet: test13, cls helper ok",
  1987. .insns = {
  1988. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  1989. offsetof(struct __sk_buff, data)),
  1990. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  1991. offsetof(struct __sk_buff, data_end)),
  1992. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  1993. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1994. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  1995. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  1996. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  1997. BPF_MOV64_IMM(BPF_REG_2, 4),
  1998. BPF_MOV64_IMM(BPF_REG_3, 0),
  1999. BPF_MOV64_IMM(BPF_REG_4, 0),
  2000. BPF_MOV64_IMM(BPF_REG_5, 0),
  2001. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2002. BPF_MOV64_IMM(BPF_REG_0, 0),
  2003. BPF_EXIT_INSN(),
  2004. },
  2005. .result = ACCEPT,
  2006. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2007. },
  2008. {
  2009. "helper access to packet: test14, cls helper fail sub",
  2010. .insns = {
  2011. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2012. offsetof(struct __sk_buff, data)),
  2013. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2014. offsetof(struct __sk_buff, data_end)),
  2015. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2016. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2017. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2018. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2019. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
  2020. BPF_MOV64_IMM(BPF_REG_2, 4),
  2021. BPF_MOV64_IMM(BPF_REG_3, 0),
  2022. BPF_MOV64_IMM(BPF_REG_4, 0),
  2023. BPF_MOV64_IMM(BPF_REG_5, 0),
  2024. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2025. BPF_MOV64_IMM(BPF_REG_0, 0),
  2026. BPF_EXIT_INSN(),
  2027. },
  2028. .result = REJECT,
  2029. .errstr = "type=inv expected=fp",
  2030. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2031. },
  2032. {
  2033. "helper access to packet: test15, cls helper fail range 1",
  2034. .insns = {
  2035. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2036. offsetof(struct __sk_buff, data)),
  2037. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2038. offsetof(struct __sk_buff, data_end)),
  2039. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2040. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2041. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2042. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2043. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2044. BPF_MOV64_IMM(BPF_REG_2, 8),
  2045. BPF_MOV64_IMM(BPF_REG_3, 0),
  2046. BPF_MOV64_IMM(BPF_REG_4, 0),
  2047. BPF_MOV64_IMM(BPF_REG_5, 0),
  2048. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2049. BPF_MOV64_IMM(BPF_REG_0, 0),
  2050. BPF_EXIT_INSN(),
  2051. },
  2052. .result = REJECT,
  2053. .errstr = "invalid access to packet",
  2054. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2055. },
  2056. {
  2057. "helper access to packet: test16, cls helper fail range 2",
  2058. .insns = {
  2059. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2060. offsetof(struct __sk_buff, data)),
  2061. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2062. offsetof(struct __sk_buff, data_end)),
  2063. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2064. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2065. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2066. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2067. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2068. BPF_MOV64_IMM(BPF_REG_2, -9),
  2069. BPF_MOV64_IMM(BPF_REG_3, 0),
  2070. BPF_MOV64_IMM(BPF_REG_4, 0),
  2071. BPF_MOV64_IMM(BPF_REG_5, 0),
  2072. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2073. BPF_MOV64_IMM(BPF_REG_0, 0),
  2074. BPF_EXIT_INSN(),
  2075. },
  2076. .result = REJECT,
  2077. .errstr = "invalid access to packet",
  2078. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2079. },
  2080. {
  2081. "helper access to packet: test17, cls helper fail range 3",
  2082. .insns = {
  2083. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2084. offsetof(struct __sk_buff, data)),
  2085. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2086. offsetof(struct __sk_buff, data_end)),
  2087. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2088. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2089. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2090. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2091. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2092. BPF_MOV64_IMM(BPF_REG_2, ~0),
  2093. BPF_MOV64_IMM(BPF_REG_3, 0),
  2094. BPF_MOV64_IMM(BPF_REG_4, 0),
  2095. BPF_MOV64_IMM(BPF_REG_5, 0),
  2096. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2097. BPF_MOV64_IMM(BPF_REG_0, 0),
  2098. BPF_EXIT_INSN(),
  2099. },
  2100. .result = REJECT,
  2101. .errstr = "invalid access to packet",
  2102. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2103. },
  2104. {
  2105. "helper access to packet: test18, cls helper fail range zero",
  2106. .insns = {
  2107. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2108. offsetof(struct __sk_buff, data)),
  2109. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2110. offsetof(struct __sk_buff, data_end)),
  2111. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2112. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2113. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2114. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2115. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2116. BPF_MOV64_IMM(BPF_REG_2, 0),
  2117. BPF_MOV64_IMM(BPF_REG_3, 0),
  2118. BPF_MOV64_IMM(BPF_REG_4, 0),
  2119. BPF_MOV64_IMM(BPF_REG_5, 0),
  2120. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2121. BPF_MOV64_IMM(BPF_REG_0, 0),
  2122. BPF_EXIT_INSN(),
  2123. },
  2124. .result = REJECT,
  2125. .errstr = "invalid access to packet",
  2126. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2127. },
  2128. {
  2129. "helper access to packet: test19, pkt end as input",
  2130. .insns = {
  2131. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2132. offsetof(struct __sk_buff, data)),
  2133. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2134. offsetof(struct __sk_buff, data_end)),
  2135. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2136. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2137. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2138. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2139. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  2140. BPF_MOV64_IMM(BPF_REG_2, 4),
  2141. BPF_MOV64_IMM(BPF_REG_3, 0),
  2142. BPF_MOV64_IMM(BPF_REG_4, 0),
  2143. BPF_MOV64_IMM(BPF_REG_5, 0),
  2144. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2145. BPF_MOV64_IMM(BPF_REG_0, 0),
  2146. BPF_EXIT_INSN(),
  2147. },
  2148. .result = REJECT,
  2149. .errstr = "R1 type=pkt_end expected=fp",
  2150. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2151. },
  2152. {
  2153. "helper access to packet: test20, wrong reg",
  2154. .insns = {
  2155. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2156. offsetof(struct __sk_buff, data)),
  2157. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2158. offsetof(struct __sk_buff, data_end)),
  2159. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2160. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2161. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2162. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2163. BPF_MOV64_IMM(BPF_REG_2, 4),
  2164. BPF_MOV64_IMM(BPF_REG_3, 0),
  2165. BPF_MOV64_IMM(BPF_REG_4, 0),
  2166. BPF_MOV64_IMM(BPF_REG_5, 0),
  2167. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
  2168. BPF_MOV64_IMM(BPF_REG_0, 0),
  2169. BPF_EXIT_INSN(),
  2170. },
  2171. .result = REJECT,
  2172. .errstr = "invalid access to packet",
  2173. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2174. },
  2175. {
  2176. "valid map access into an array with a constant",
  2177. .insns = {
  2178. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2179. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2180. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2181. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2182. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2183. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2184. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2185. BPF_EXIT_INSN(),
  2186. },
  2187. .test_val_map_fixup = {3},
  2188. .errstr_unpriv = "R0 leaks addr",
  2189. .result_unpriv = REJECT,
  2190. .result = ACCEPT,
  2191. },
  2192. {
  2193. "valid map access into an array with a register",
  2194. .insns = {
  2195. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2196. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2197. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2198. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2199. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2200. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2201. BPF_MOV64_IMM(BPF_REG_1, 4),
  2202. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2203. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2204. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2205. BPF_EXIT_INSN(),
  2206. },
  2207. .test_val_map_fixup = {3},
  2208. .errstr_unpriv = "R0 leaks addr",
  2209. .result_unpriv = REJECT,
  2210. .result = ACCEPT,
  2211. },
  2212. {
  2213. "valid map access into an array with a variable",
  2214. .insns = {
  2215. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2216. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2217. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2218. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2219. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2220. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  2221. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2222. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
  2223. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2224. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2225. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2226. BPF_EXIT_INSN(),
  2227. },
  2228. .test_val_map_fixup = {3},
  2229. .errstr_unpriv = "R0 leaks addr",
  2230. .result_unpriv = REJECT,
  2231. .result = ACCEPT,
  2232. },
  2233. {
  2234. "valid map access into an array with a signed variable",
  2235. .insns = {
  2236. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2237. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2238. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2239. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2240. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2241. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  2242. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2243. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
  2244. BPF_MOV32_IMM(BPF_REG_1, 0),
  2245. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  2246. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  2247. BPF_MOV32_IMM(BPF_REG_1, 0),
  2248. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2249. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2250. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2251. BPF_EXIT_INSN(),
  2252. },
  2253. .test_val_map_fixup = {3},
  2254. .errstr_unpriv = "R0 leaks addr",
  2255. .result_unpriv = REJECT,
  2256. .result = ACCEPT,
  2257. },
  2258. {
  2259. "invalid map access into an array with a constant",
  2260. .insns = {
  2261. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2262. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2263. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2264. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2265. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2266. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2267. BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
  2268. offsetof(struct test_val, foo)),
  2269. BPF_EXIT_INSN(),
  2270. },
  2271. .test_val_map_fixup = {3},
  2272. .errstr = "invalid access to map value, value_size=48 off=48 size=8",
  2273. .result = REJECT,
  2274. },
  2275. {
  2276. "invalid map access into an array with a register",
  2277. .insns = {
  2278. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2279. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2280. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2281. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2282. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2283. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2284. BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
  2285. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2286. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2287. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2288. BPF_EXIT_INSN(),
  2289. },
  2290. .test_val_map_fixup = {3},
  2291. .errstr = "R0 min value is outside of the array range",
  2292. .result = REJECT,
  2293. },
  2294. {
  2295. "invalid map access into an array with a variable",
  2296. .insns = {
  2297. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2298. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2299. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2300. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2301. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2302. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2303. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2304. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2305. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2306. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2307. BPF_EXIT_INSN(),
  2308. },
  2309. .test_val_map_fixup = {3},
  2310. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2311. .result = REJECT,
  2312. },
  2313. {
  2314. "invalid map access into an array with no floor check",
  2315. .insns = {
  2316. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2317. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2318. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2319. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2320. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2321. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  2322. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2323. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  2324. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  2325. BPF_MOV32_IMM(BPF_REG_1, 0),
  2326. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2327. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2328. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2329. BPF_EXIT_INSN(),
  2330. },
  2331. .test_val_map_fixup = {3},
  2332. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2333. .result = REJECT,
  2334. },
  2335. {
  2336. "invalid map access into an array with a invalid max check",
  2337. .insns = {
  2338. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2339. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2340. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2341. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2342. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2343. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  2344. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2345. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
  2346. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  2347. BPF_MOV32_IMM(BPF_REG_1, 0),
  2348. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2349. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2350. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2351. BPF_EXIT_INSN(),
  2352. },
  2353. .test_val_map_fixup = {3},
  2354. .errstr = "invalid access to map value, value_size=48 off=44 size=8",
  2355. .result = REJECT,
  2356. },
  2357. {
  2358. "invalid map access into an array with a invalid max check",
  2359. .insns = {
  2360. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2361. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2362. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2363. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2364. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2365. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  2366. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  2367. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2368. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2369. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2370. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2371. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2372. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  2373. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  2374. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct test_val, foo)),
  2375. BPF_EXIT_INSN(),
  2376. },
  2377. .test_val_map_fixup = {3, 11},
  2378. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2379. .result = REJECT,
  2380. },
  2381. };
  2382. static int probe_filter_length(struct bpf_insn *fp)
  2383. {
  2384. int len = 0;
  2385. for (len = MAX_INSNS - 1; len > 0; --len)
  2386. if (fp[len].code != 0 || fp[len].imm != 0)
  2387. break;
  2388. return len + 1;
  2389. }
  2390. static int create_map(size_t val_size, int num)
  2391. {
  2392. int map_fd;
  2393. map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
  2394. sizeof(long long), val_size, num, 0);
  2395. if (map_fd < 0)
  2396. printf("failed to create map '%s'\n", strerror(errno));
  2397. return map_fd;
  2398. }
  2399. static int create_prog_array(void)
  2400. {
  2401. int map_fd;
  2402. map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
  2403. sizeof(int), sizeof(int), 4, 0);
  2404. if (map_fd < 0)
  2405. printf("failed to create prog_array '%s'\n", strerror(errno));
  2406. return map_fd;
  2407. }
  2408. static int test(void)
  2409. {
  2410. int prog_fd, i, pass_cnt = 0, err_cnt = 0;
  2411. bool unpriv = geteuid() != 0;
  2412. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  2413. struct bpf_insn *prog = tests[i].insns;
  2414. int prog_type = tests[i].prog_type;
  2415. int prog_len = probe_filter_length(prog);
  2416. int *fixup = tests[i].fixup;
  2417. int *prog_array_fixup = tests[i].prog_array_fixup;
  2418. int *test_val_map_fixup = tests[i].test_val_map_fixup;
  2419. int expected_result;
  2420. const char *expected_errstr;
  2421. int map_fd = -1, prog_array_fd = -1, test_val_map_fd = -1;
  2422. if (*fixup) {
  2423. map_fd = create_map(sizeof(long long), 1024);
  2424. do {
  2425. prog[*fixup].imm = map_fd;
  2426. fixup++;
  2427. } while (*fixup);
  2428. }
  2429. if (*prog_array_fixup) {
  2430. prog_array_fd = create_prog_array();
  2431. do {
  2432. prog[*prog_array_fixup].imm = prog_array_fd;
  2433. prog_array_fixup++;
  2434. } while (*prog_array_fixup);
  2435. }
  2436. if (*test_val_map_fixup) {
  2437. /* Unprivileged can't create a hash map.*/
  2438. if (unpriv)
  2439. continue;
  2440. test_val_map_fd = create_map(sizeof(struct test_val),
  2441. 256);
  2442. do {
  2443. prog[*test_val_map_fixup].imm = test_val_map_fd;
  2444. test_val_map_fixup++;
  2445. } while (*test_val_map_fixup);
  2446. }
  2447. printf("#%d %s ", i, tests[i].descr);
  2448. prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
  2449. prog, prog_len * sizeof(struct bpf_insn),
  2450. "GPL", 0);
  2451. if (unpriv && tests[i].result_unpriv != UNDEF)
  2452. expected_result = tests[i].result_unpriv;
  2453. else
  2454. expected_result = tests[i].result;
  2455. if (unpriv && tests[i].errstr_unpriv)
  2456. expected_errstr = tests[i].errstr_unpriv;
  2457. else
  2458. expected_errstr = tests[i].errstr;
  2459. if (expected_result == ACCEPT) {
  2460. if (prog_fd < 0) {
  2461. printf("FAIL\nfailed to load prog '%s'\n",
  2462. strerror(errno));
  2463. printf("%s", bpf_log_buf);
  2464. err_cnt++;
  2465. goto fail;
  2466. }
  2467. } else {
  2468. if (prog_fd >= 0) {
  2469. printf("FAIL\nunexpected success to load\n");
  2470. printf("%s", bpf_log_buf);
  2471. err_cnt++;
  2472. goto fail;
  2473. }
  2474. if (strstr(bpf_log_buf, expected_errstr) == 0) {
  2475. printf("FAIL\nunexpected error message: %s",
  2476. bpf_log_buf);
  2477. err_cnt++;
  2478. goto fail;
  2479. }
  2480. }
  2481. pass_cnt++;
  2482. printf("OK\n");
  2483. fail:
  2484. if (map_fd >= 0)
  2485. close(map_fd);
  2486. if (prog_array_fd >= 0)
  2487. close(prog_array_fd);
  2488. if (test_val_map_fd >= 0)
  2489. close(test_val_map_fd);
  2490. close(prog_fd);
  2491. }
  2492. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
  2493. return 0;
  2494. }
  2495. int main(void)
  2496. {
  2497. struct rlimit r = {1 << 20, 1 << 20};
  2498. setrlimit(RLIMIT_MEMLOCK, &r);
  2499. return test();
  2500. }