bpf_jit_comp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /*
  2. * BPF Jit compiler for s390.
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/moduleloader.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/filter.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/processor.h>
  13. #include <asm/facility.h>
  14. /*
  15. * Conventions:
  16. * %r2 = skb pointer
  17. * %r3 = offset parameter
  18. * %r4 = scratch register / length parameter
  19. * %r5 = BPF A accumulator
  20. * %r8 = return address
  21. * %r9 = save register for skb pointer
  22. * %r10 = skb->data
  23. * %r11 = skb->len - skb->data_len (headlen)
  24. * %r12 = BPF X accumulator
  25. * %r13 = literal pool pointer
  26. * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
  27. */
  28. int bpf_jit_enable __read_mostly;
  29. /*
  30. * assembly code in arch/x86/net/bpf_jit.S
  31. */
  32. extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
  33. extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
  34. struct bpf_jit {
  35. unsigned int seen;
  36. u8 *start;
  37. u8 *prg;
  38. u8 *mid;
  39. u8 *lit;
  40. u8 *end;
  41. u8 *base_ip;
  42. u8 *ret0_ip;
  43. u8 *exit_ip;
  44. unsigned int off_load_word;
  45. unsigned int off_load_half;
  46. unsigned int off_load_byte;
  47. unsigned int off_load_bmsh;
  48. unsigned int off_load_iword;
  49. unsigned int off_load_ihalf;
  50. unsigned int off_load_ibyte;
  51. };
  52. #define BPF_SIZE_MAX 4096 /* Max size for program */
  53. #define SEEN_DATAREF 1 /* might call external helpers */
  54. #define SEEN_XREG 2 /* ebx is used */
  55. #define SEEN_MEM 4 /* use mem[] for temporary storage */
  56. #define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
  57. #define SEEN_LITERAL 16 /* code uses literals */
  58. #define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
  59. #define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
  60. #define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
  61. #define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
  62. #define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
  63. #define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
  64. #define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
  65. #define EMIT2(op) \
  66. ({ \
  67. if (jit->prg + 2 <= jit->mid) \
  68. *(u16 *) jit->prg = op; \
  69. jit->prg += 2; \
  70. })
  71. #define EMIT4(op) \
  72. ({ \
  73. if (jit->prg + 4 <= jit->mid) \
  74. *(u32 *) jit->prg = op; \
  75. jit->prg += 4; \
  76. })
  77. #define EMIT4_DISP(op, disp) \
  78. ({ \
  79. unsigned int __disp = (disp) & 0xfff; \
  80. EMIT4(op | __disp); \
  81. })
  82. #define EMIT4_IMM(op, imm) \
  83. ({ \
  84. unsigned int __imm = (imm) & 0xffff; \
  85. EMIT4(op | __imm); \
  86. })
  87. #define EMIT4_PCREL(op, pcrel) \
  88. ({ \
  89. long __pcrel = ((pcrel) >> 1) & 0xffff; \
  90. EMIT4(op | __pcrel); \
  91. })
  92. #define EMIT6(op1, op2) \
  93. ({ \
  94. if (jit->prg + 6 <= jit->mid) { \
  95. *(u32 *) jit->prg = op1; \
  96. *(u16 *) (jit->prg + 4) = op2; \
  97. } \
  98. jit->prg += 6; \
  99. })
  100. #define EMIT6_DISP(op1, op2, disp) \
  101. ({ \
  102. unsigned int __disp = (disp) & 0xfff; \
  103. EMIT6(op1 | __disp, op2); \
  104. })
  105. #define EMIT6_IMM(op, imm) \
  106. ({ \
  107. unsigned int __imm = (imm); \
  108. EMIT6(op | (__imm >> 16), __imm & 0xffff); \
  109. })
  110. #define EMIT_CONST(val) \
  111. ({ \
  112. unsigned int ret; \
  113. ret = (unsigned int) (jit->lit - jit->base_ip); \
  114. jit->seen |= SEEN_LITERAL; \
  115. if (jit->lit + 4 <= jit->end) \
  116. *(u32 *) jit->lit = val; \
  117. jit->lit += 4; \
  118. ret; \
  119. })
  120. #define EMIT_FN_CONST(bit, fn) \
  121. ({ \
  122. unsigned int ret; \
  123. ret = (unsigned int) (jit->lit - jit->base_ip); \
  124. if (jit->seen & bit) { \
  125. jit->seen |= SEEN_LITERAL; \
  126. if (jit->lit + 8 <= jit->end) \
  127. *(void **) jit->lit = fn; \
  128. jit->lit += 8; \
  129. } \
  130. ret; \
  131. })
  132. static void bpf_jit_prologue(struct bpf_jit *jit)
  133. {
  134. /* Save registers and create stack frame if necessary */
  135. if (jit->seen & SEEN_DATAREF) {
  136. /* stmg %r8,%r15,88(%r15) */
  137. EMIT6(0xeb8ff058, 0x0024);
  138. /* lgr %r14,%r15 */
  139. EMIT4(0xb90400ef);
  140. /* ahi %r15,<offset> */
  141. EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
  142. /* stg %r14,152(%r15) */
  143. EMIT6(0xe3e0f098, 0x0024);
  144. } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  145. /* stmg %r12,%r13,120(%r15) */
  146. EMIT6(0xebcdf078, 0x0024);
  147. else if (jit->seen & SEEN_XREG)
  148. /* stg %r12,120(%r15) */
  149. EMIT6(0xe3c0f078, 0x0024);
  150. else if (jit->seen & SEEN_LITERAL)
  151. /* stg %r13,128(%r15) */
  152. EMIT6(0xe3d0f080, 0x0024);
  153. /* Setup literal pool */
  154. if (jit->seen & SEEN_LITERAL) {
  155. /* basr %r13,0 */
  156. EMIT2(0x0dd0);
  157. jit->base_ip = jit->prg;
  158. }
  159. jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
  160. jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
  161. jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
  162. jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
  163. jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
  164. jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
  165. jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
  166. /* Filter needs to access skb data */
  167. if (jit->seen & SEEN_DATAREF) {
  168. /* l %r11,<len>(%r2) */
  169. EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
  170. /* s %r11,<data_len>(%r2) */
  171. EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
  172. /* lg %r10,<data>(%r2) */
  173. EMIT6_DISP(0xe3a02000, 0x0004,
  174. offsetof(struct sk_buff, data));
  175. }
  176. }
  177. static void bpf_jit_epilogue(struct bpf_jit *jit)
  178. {
  179. /* Return 0 */
  180. if (jit->seen & SEEN_RET0) {
  181. jit->ret0_ip = jit->prg;
  182. /* lghi %r2,0 */
  183. EMIT4(0xa7290000);
  184. }
  185. jit->exit_ip = jit->prg;
  186. /* Restore registers */
  187. if (jit->seen & SEEN_DATAREF)
  188. /* lmg %r8,%r15,<offset>(%r15) */
  189. EMIT6_DISP(0xeb8ff000, 0x0004,
  190. (jit->seen & SEEN_MEM) ? 200 : 168);
  191. else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  192. /* lmg %r12,%r13,120(%r15) */
  193. EMIT6(0xebcdf078, 0x0004);
  194. else if (jit->seen & SEEN_XREG)
  195. /* lg %r12,120(%r15) */
  196. EMIT6(0xe3c0f078, 0x0004);
  197. else if (jit->seen & SEEN_LITERAL)
  198. /* lg %r13,128(%r15) */
  199. EMIT6(0xe3d0f080, 0x0004);
  200. /* br %r14 */
  201. EMIT2(0x07fe);
  202. }
  203. /*
  204. * make sure we dont leak kernel information to user
  205. */
  206. static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
  207. {
  208. /* Clear temporary memory if (seen & SEEN_MEM) */
  209. if (jit->seen & SEEN_MEM)
  210. /* xc 0(64,%r15),0(%r15) */
  211. EMIT6(0xd73ff000, 0xf000);
  212. /* Clear X if (seen & SEEN_XREG) */
  213. if (jit->seen & SEEN_XREG)
  214. /* lhi %r12,0 */
  215. EMIT4(0xa7c80000);
  216. /* Clear A if the first register does not set it. */
  217. switch (filter[0].code) {
  218. case BPF_S_LD_W_ABS:
  219. case BPF_S_LD_H_ABS:
  220. case BPF_S_LD_B_ABS:
  221. case BPF_S_LD_W_LEN:
  222. case BPF_S_LD_W_IND:
  223. case BPF_S_LD_H_IND:
  224. case BPF_S_LD_B_IND:
  225. case BPF_S_LDX_B_MSH:
  226. case BPF_S_LD_IMM:
  227. case BPF_S_LD_MEM:
  228. case BPF_S_MISC_TXA:
  229. case BPF_S_ANC_PROTOCOL:
  230. case BPF_S_ANC_PKTTYPE:
  231. case BPF_S_ANC_IFINDEX:
  232. case BPF_S_ANC_MARK:
  233. case BPF_S_ANC_QUEUE:
  234. case BPF_S_ANC_HATYPE:
  235. case BPF_S_ANC_RXHASH:
  236. case BPF_S_ANC_CPU:
  237. case BPF_S_RET_K:
  238. /* first instruction sets A register */
  239. break;
  240. default: /* A = 0 */
  241. /* lhi %r5,0 */
  242. EMIT4(0xa7580000);
  243. }
  244. }
  245. static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
  246. unsigned int *addrs, int i, int last)
  247. {
  248. unsigned int K;
  249. int offset;
  250. unsigned int mask;
  251. K = filter->k;
  252. switch (filter->code) {
  253. case BPF_S_ALU_ADD_X: /* A += X */
  254. jit->seen |= SEEN_XREG;
  255. /* ar %r5,%r12 */
  256. EMIT2(0x1a5c);
  257. break;
  258. case BPF_S_ALU_ADD_K: /* A += K */
  259. if (!K)
  260. break;
  261. if (K <= 16383)
  262. /* ahi %r5,<K> */
  263. EMIT4_IMM(0xa75a0000, K);
  264. else if (test_facility(21))
  265. /* alfi %r5,<K> */
  266. EMIT6_IMM(0xc25b0000, K);
  267. else
  268. /* a %r5,<d(K)>(%r13) */
  269. EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
  270. break;
  271. case BPF_S_ALU_SUB_X: /* A -= X */
  272. jit->seen |= SEEN_XREG;
  273. /* sr %r5,%r12 */
  274. EMIT2(0x1b5c);
  275. break;
  276. case BPF_S_ALU_SUB_K: /* A -= K */
  277. if (!K)
  278. break;
  279. if (K <= 16384)
  280. /* ahi %r5,-K */
  281. EMIT4_IMM(0xa75a0000, -K);
  282. else if (test_facility(21))
  283. /* alfi %r5,-K */
  284. EMIT6_IMM(0xc25b0000, -K);
  285. else
  286. /* s %r5,<d(K)>(%r13) */
  287. EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
  288. break;
  289. case BPF_S_ALU_MUL_X: /* A *= X */
  290. jit->seen |= SEEN_XREG;
  291. /* msr %r5,%r12 */
  292. EMIT4(0xb252005c);
  293. break;
  294. case BPF_S_ALU_MUL_K: /* A *= K */
  295. if (K <= 16383)
  296. /* mhi %r5,K */
  297. EMIT4_IMM(0xa75c0000, K);
  298. else if (test_facility(34))
  299. /* msfi %r5,<K> */
  300. EMIT6_IMM(0xc2510000, K);
  301. else
  302. /* ms %r5,<d(K)>(%r13) */
  303. EMIT4_DISP(0x7150d000, EMIT_CONST(K));
  304. break;
  305. case BPF_S_ALU_DIV_X: /* A /= X */
  306. jit->seen |= SEEN_XREG | SEEN_RET0;
  307. /* ltr %r12,%r12 */
  308. EMIT2(0x12cc);
  309. /* jz <ret0> */
  310. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  311. /* lhi %r4,0 */
  312. EMIT4(0xa7480000);
  313. /* dr %r4,%r12 */
  314. EMIT2(0x1d4c);
  315. break;
  316. case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
  317. /* m %r4,<d(K)>(%r13) */
  318. EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
  319. /* lr %r5,%r4 */
  320. EMIT2(0x1854);
  321. break;
  322. case BPF_S_ALU_MOD_X: /* A %= X */
  323. jit->seen |= SEEN_XREG | SEEN_RET0;
  324. /* ltr %r12,%r12 */
  325. EMIT2(0x12cc);
  326. /* jz <ret0> */
  327. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  328. /* lhi %r4,0 */
  329. EMIT4(0xa7480000);
  330. /* dr %r4,%r12 */
  331. EMIT2(0x1d4c);
  332. /* lr %r5,%r4 */
  333. EMIT2(0x1854);
  334. break;
  335. case BPF_S_ALU_MOD_K: /* A %= K */
  336. /* lhi %r4,0 */
  337. EMIT4(0xa7480000);
  338. /* d %r4,<d(K)>(%r13) */
  339. EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
  340. /* lr %r5,%r4 */
  341. EMIT2(0x1854);
  342. break;
  343. case BPF_S_ALU_AND_X: /* A &= X */
  344. jit->seen |= SEEN_XREG;
  345. /* nr %r5,%r12 */
  346. EMIT2(0x145c);
  347. break;
  348. case BPF_S_ALU_AND_K: /* A &= K */
  349. if (test_facility(21))
  350. /* nilf %r5,<K> */
  351. EMIT6_IMM(0xc05b0000, K);
  352. else
  353. /* n %r5,<d(K)>(%r13) */
  354. EMIT4_DISP(0x5450d000, EMIT_CONST(K));
  355. break;
  356. case BPF_S_ALU_OR_X: /* A |= X */
  357. jit->seen |= SEEN_XREG;
  358. /* or %r5,%r12 */
  359. EMIT2(0x165c);
  360. break;
  361. case BPF_S_ALU_OR_K: /* A |= K */
  362. if (test_facility(21))
  363. /* oilf %r5,<K> */
  364. EMIT6_IMM(0xc05d0000, K);
  365. else
  366. /* o %r5,<d(K)>(%r13) */
  367. EMIT4_DISP(0x5650d000, EMIT_CONST(K));
  368. break;
  369. case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
  370. case BPF_S_ALU_XOR_X:
  371. jit->seen |= SEEN_XREG;
  372. /* xr %r5,%r12 */
  373. EMIT2(0x175c);
  374. break;
  375. case BPF_S_ALU_XOR_K: /* A ^= K */
  376. if (!K)
  377. break;
  378. /* x %r5,<d(K)>(%r13) */
  379. EMIT4_DISP(0x5750d000, EMIT_CONST(K));
  380. break;
  381. case BPF_S_ALU_LSH_X: /* A <<= X; */
  382. jit->seen |= SEEN_XREG;
  383. /* sll %r5,0(%r12) */
  384. EMIT4(0x8950c000);
  385. break;
  386. case BPF_S_ALU_LSH_K: /* A <<= K */
  387. if (K == 0)
  388. break;
  389. /* sll %r5,K */
  390. EMIT4_DISP(0x89500000, K);
  391. break;
  392. case BPF_S_ALU_RSH_X: /* A >>= X; */
  393. jit->seen |= SEEN_XREG;
  394. /* srl %r5,0(%r12) */
  395. EMIT4(0x8850c000);
  396. break;
  397. case BPF_S_ALU_RSH_K: /* A >>= K; */
  398. if (K == 0)
  399. break;
  400. /* srl %r5,K */
  401. EMIT4_DISP(0x88500000, K);
  402. break;
  403. case BPF_S_ALU_NEG: /* A = -A */
  404. /* lnr %r5,%r5 */
  405. EMIT2(0x1155);
  406. break;
  407. case BPF_S_JMP_JA: /* ip += K */
  408. offset = addrs[i + K] + jit->start - jit->prg;
  409. EMIT4_PCREL(0xa7f40000, offset);
  410. break;
  411. case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
  412. mask = 0x200000; /* jh */
  413. goto kbranch;
  414. case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
  415. mask = 0xa00000; /* jhe */
  416. goto kbranch;
  417. case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
  418. mask = 0x800000; /* je */
  419. kbranch: /* Emit compare if the branch targets are different */
  420. if (filter->jt != filter->jf) {
  421. if (K <= 16383)
  422. /* chi %r5,<K> */
  423. EMIT4_IMM(0xa75e0000, K);
  424. else if (test_facility(21))
  425. /* clfi %r5,<K> */
  426. EMIT6_IMM(0xc25f0000, K);
  427. else
  428. /* c %r5,<d(K)>(%r13) */
  429. EMIT4_DISP(0x5950d000, EMIT_CONST(K));
  430. }
  431. branch: if (filter->jt == filter->jf) {
  432. if (filter->jt == 0)
  433. break;
  434. /* j <jt> */
  435. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  436. EMIT4_PCREL(0xa7f40000, offset);
  437. break;
  438. }
  439. if (filter->jt != 0) {
  440. /* brc <mask>,<jt> */
  441. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  442. EMIT4_PCREL(0xa7040000 | mask, offset);
  443. }
  444. if (filter->jf != 0) {
  445. /* brc <mask^15>,<jf> */
  446. offset = addrs[i + filter->jf] + jit->start - jit->prg;
  447. EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
  448. }
  449. break;
  450. case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
  451. mask = 0x700000; /* jnz */
  452. /* Emit test if the branch targets are different */
  453. if (filter->jt != filter->jf) {
  454. if (K > 65535) {
  455. /* lr %r4,%r5 */
  456. EMIT2(0x1845);
  457. /* n %r4,<d(K)>(%r13) */
  458. EMIT4_DISP(0x5440d000, EMIT_CONST(K));
  459. } else
  460. /* tmll %r5,K */
  461. EMIT4_IMM(0xa7510000, K);
  462. }
  463. goto branch;
  464. case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
  465. mask = 0x200000; /* jh */
  466. goto xbranch;
  467. case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
  468. mask = 0xa00000; /* jhe */
  469. goto xbranch;
  470. case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
  471. mask = 0x800000; /* je */
  472. xbranch: /* Emit compare if the branch targets are different */
  473. if (filter->jt != filter->jf) {
  474. jit->seen |= SEEN_XREG;
  475. /* cr %r5,%r12 */
  476. EMIT2(0x195c);
  477. }
  478. goto branch;
  479. case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
  480. mask = 0x700000; /* jnz */
  481. /* Emit test if the branch targets are different */
  482. if (filter->jt != filter->jf) {
  483. jit->seen |= SEEN_XREG;
  484. /* lr %r4,%r5 */
  485. EMIT2(0x1845);
  486. /* nr %r4,%r12 */
  487. EMIT2(0x144c);
  488. }
  489. goto branch;
  490. case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
  491. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
  492. offset = jit->off_load_word;
  493. goto load_abs;
  494. case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
  495. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
  496. offset = jit->off_load_half;
  497. goto load_abs;
  498. case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
  499. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
  500. offset = jit->off_load_byte;
  501. load_abs: if ((int) K < 0)
  502. goto out;
  503. call_fn: /* lg %r1,<d(function)>(%r13) */
  504. EMIT6_DISP(0xe310d000, 0x0004, offset);
  505. /* l %r3,<d(K)>(%r13) */
  506. EMIT4_DISP(0x5830d000, EMIT_CONST(K));
  507. /* basr %r8,%r1 */
  508. EMIT2(0x0d81);
  509. /* jnz <ret0> */
  510. EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
  511. break;
  512. case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
  513. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
  514. offset = jit->off_load_iword;
  515. goto call_fn;
  516. case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
  517. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
  518. offset = jit->off_load_ihalf;
  519. goto call_fn;
  520. case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
  521. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
  522. offset = jit->off_load_ibyte;
  523. goto call_fn;
  524. case BPF_S_LDX_B_MSH:
  525. /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
  526. jit->seen |= SEEN_RET0;
  527. if ((int) K < 0) {
  528. /* j <ret0> */
  529. EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
  530. break;
  531. }
  532. jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
  533. offset = jit->off_load_bmsh;
  534. goto call_fn;
  535. case BPF_S_LD_W_LEN: /* A = skb->len; */
  536. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  537. /* l %r5,<d(len)>(%r2) */
  538. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
  539. break;
  540. case BPF_S_LDX_W_LEN: /* X = skb->len; */
  541. jit->seen |= SEEN_XREG;
  542. /* l %r12,<d(len)>(%r2) */
  543. EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
  544. break;
  545. case BPF_S_LD_IMM: /* A = K */
  546. if (K <= 16383)
  547. /* lhi %r5,K */
  548. EMIT4_IMM(0xa7580000, K);
  549. else if (test_facility(21))
  550. /* llilf %r5,<K> */
  551. EMIT6_IMM(0xc05f0000, K);
  552. else
  553. /* l %r5,<d(K)>(%r13) */
  554. EMIT4_DISP(0x5850d000, EMIT_CONST(K));
  555. break;
  556. case BPF_S_LDX_IMM: /* X = K */
  557. jit->seen |= SEEN_XREG;
  558. if (K <= 16383)
  559. /* lhi %r12,<K> */
  560. EMIT4_IMM(0xa7c80000, K);
  561. else if (test_facility(21))
  562. /* llilf %r12,<K> */
  563. EMIT6_IMM(0xc0cf0000, K);
  564. else
  565. /* l %r12,<d(K)>(%r13) */
  566. EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
  567. break;
  568. case BPF_S_LD_MEM: /* A = mem[K] */
  569. jit->seen |= SEEN_MEM;
  570. /* l %r5,<K>(%r15) */
  571. EMIT4_DISP(0x5850f000,
  572. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  573. break;
  574. case BPF_S_LDX_MEM: /* X = mem[K] */
  575. jit->seen |= SEEN_XREG | SEEN_MEM;
  576. /* l %r12,<K>(%r15) */
  577. EMIT4_DISP(0x58c0f000,
  578. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  579. break;
  580. case BPF_S_MISC_TAX: /* X = A */
  581. jit->seen |= SEEN_XREG;
  582. /* lr %r12,%r5 */
  583. EMIT2(0x18c5);
  584. break;
  585. case BPF_S_MISC_TXA: /* A = X */
  586. jit->seen |= SEEN_XREG;
  587. /* lr %r5,%r12 */
  588. EMIT2(0x185c);
  589. break;
  590. case BPF_S_RET_K:
  591. if (K == 0) {
  592. jit->seen |= SEEN_RET0;
  593. if (last)
  594. break;
  595. /* j <ret0> */
  596. EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
  597. } else {
  598. if (K <= 16383)
  599. /* lghi %r2,K */
  600. EMIT4_IMM(0xa7290000, K);
  601. else
  602. /* llgf %r2,<K>(%r13) */
  603. EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
  604. /* j <exit> */
  605. if (last && !(jit->seen & SEEN_RET0))
  606. break;
  607. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  608. }
  609. break;
  610. case BPF_S_RET_A:
  611. /* llgfr %r2,%r5 */
  612. EMIT4(0xb9160025);
  613. /* j <exit> */
  614. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  615. break;
  616. case BPF_S_ST: /* mem[K] = A */
  617. jit->seen |= SEEN_MEM;
  618. /* st %r5,<K>(%r15) */
  619. EMIT4_DISP(0x5050f000,
  620. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  621. break;
  622. case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
  623. jit->seen |= SEEN_XREG | SEEN_MEM;
  624. /* st %r12,<K>(%r15) */
  625. EMIT4_DISP(0x50c0f000,
  626. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  627. break;
  628. case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
  629. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
  630. /* lhi %r5,0 */
  631. EMIT4(0xa7580000);
  632. /* icm %r5,3,<d(protocol)>(%r2) */
  633. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
  634. break;
  635. case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
  636. * A = skb->dev->ifindex */
  637. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
  638. jit->seen |= SEEN_RET0;
  639. /* lg %r1,<d(dev)>(%r2) */
  640. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  641. /* ltgr %r1,%r1 */
  642. EMIT4(0xb9020011);
  643. /* jz <ret0> */
  644. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  645. /* l %r5,<d(ifindex)>(%r1) */
  646. EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
  647. break;
  648. case BPF_S_ANC_MARK: /* A = skb->mark */
  649. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  650. /* l %r5,<d(mark)>(%r2) */
  651. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
  652. break;
  653. case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
  654. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
  655. /* lhi %r5,0 */
  656. EMIT4(0xa7580000);
  657. /* icm %r5,3,<d(queue_mapping)>(%r2) */
  658. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
  659. break;
  660. case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
  661. * A = skb->dev->type */
  662. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
  663. jit->seen |= SEEN_RET0;
  664. /* lg %r1,<d(dev)>(%r2) */
  665. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  666. /* ltgr %r1,%r1 */
  667. EMIT4(0xb9020011);
  668. /* jz <ret0> */
  669. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  670. /* lhi %r5,0 */
  671. EMIT4(0xa7580000);
  672. /* icm %r5,3,<d(type)>(%r1) */
  673. EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
  674. break;
  675. case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
  676. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
  677. /* l %r5,<d(rxhash)>(%r2) */
  678. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
  679. break;
  680. case BPF_S_ANC_CPU: /* A = smp_processor_id() */
  681. #ifdef CONFIG_SMP
  682. /* l %r5,<d(cpu_nr)> */
  683. EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
  684. #else
  685. /* lhi %r5,0 */
  686. EMIT4(0xa7580000);
  687. #endif
  688. break;
  689. default: /* too complex, give up */
  690. goto out;
  691. }
  692. addrs[i] = jit->prg - jit->start;
  693. return 0;
  694. out:
  695. return -1;
  696. }
  697. void bpf_jit_compile(struct sk_filter *fp)
  698. {
  699. unsigned long size, prg_len, lit_len;
  700. struct bpf_jit jit, cjit;
  701. unsigned int *addrs;
  702. int pass, i;
  703. if (!bpf_jit_enable)
  704. return;
  705. addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
  706. if (addrs == NULL)
  707. return;
  708. memset(addrs, 0, fp->len * sizeof(*addrs));
  709. memset(&jit, 0, sizeof(cjit));
  710. memset(&cjit, 0, sizeof(cjit));
  711. for (pass = 0; pass < 10; pass++) {
  712. jit.prg = jit.start;
  713. jit.lit = jit.mid;
  714. bpf_jit_prologue(&jit);
  715. bpf_jit_noleaks(&jit, fp->insns);
  716. for (i = 0; i < fp->len; i++) {
  717. if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
  718. i == fp->len - 1))
  719. goto out;
  720. }
  721. bpf_jit_epilogue(&jit);
  722. if (jit.start) {
  723. WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
  724. if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
  725. break;
  726. } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
  727. prg_len = jit.prg - jit.start;
  728. lit_len = jit.lit - jit.mid;
  729. size = max_t(unsigned long, prg_len + lit_len,
  730. sizeof(struct work_struct));
  731. if (size >= BPF_SIZE_MAX)
  732. goto out;
  733. jit.start = module_alloc(size);
  734. if (!jit.start)
  735. goto out;
  736. jit.prg = jit.mid = jit.start + prg_len;
  737. jit.lit = jit.end = jit.start + prg_len + lit_len;
  738. jit.base_ip += (unsigned long) jit.start;
  739. jit.exit_ip += (unsigned long) jit.start;
  740. jit.ret0_ip += (unsigned long) jit.start;
  741. }
  742. cjit = jit;
  743. }
  744. if (bpf_jit_enable > 1) {
  745. pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
  746. fp->len, jit.end - jit.start, pass, jit.start);
  747. if (jit.start) {
  748. printk(KERN_ERR "JIT code:\n");
  749. print_fn_code(jit.start, jit.mid - jit.start);
  750. print_hex_dump(KERN_ERR, "JIT literals:\n",
  751. DUMP_PREFIX_ADDRESS, 16, 1,
  752. jit.mid, jit.end - jit.mid, false);
  753. }
  754. }
  755. if (jit.start)
  756. fp->bpf_func = (void *) jit.start;
  757. out:
  758. kfree(addrs);
  759. }
  760. static void jit_free_defer(struct work_struct *arg)
  761. {
  762. module_free(NULL, arg);
  763. }
  764. /* run from softirq, we must use a work_struct to call
  765. * module_free() from process context
  766. */
  767. void bpf_jit_free(struct sk_filter *fp)
  768. {
  769. struct work_struct *work;
  770. if (fp->bpf_func == sk_run_filter)
  771. return;
  772. work = (struct work_struct *)fp->bpf_func;
  773. INIT_WORK(work, jit_free_defer);
  774. schedule_work(work);
  775. }