bpf_jit_comp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * BPF Jit compiler for s390.
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/moduleloader.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/filter.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/processor.h>
  13. #include <asm/facility.h>
  14. /*
  15. * Conventions:
  16. * %r2 = skb pointer
  17. * %r3 = offset parameter
  18. * %r4 = scratch register / length parameter
  19. * %r5 = BPF A accumulator
  20. * %r8 = return address
  21. * %r9 = save register for skb pointer
  22. * %r10 = skb->data
  23. * %r11 = skb->len - skb->data_len (headlen)
  24. * %r12 = BPF X accumulator
  25. * %r13 = literal pool pointer
  26. * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
  27. */
  28. int bpf_jit_enable __read_mostly;
  29. /*
  30. * assembly code in arch/x86/net/bpf_jit.S
  31. */
  32. extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
  33. extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
  34. struct bpf_jit {
  35. unsigned int seen;
  36. u8 *start;
  37. u8 *prg;
  38. u8 *mid;
  39. u8 *lit;
  40. u8 *end;
  41. u8 *base_ip;
  42. u8 *ret0_ip;
  43. u8 *exit_ip;
  44. unsigned int off_load_word;
  45. unsigned int off_load_half;
  46. unsigned int off_load_byte;
  47. unsigned int off_load_bmsh;
  48. unsigned int off_load_iword;
  49. unsigned int off_load_ihalf;
  50. unsigned int off_load_ibyte;
  51. };
  52. #define BPF_SIZE_MAX 4096 /* Max size for program */
  53. #define SEEN_DATAREF 1 /* might call external helpers */
  54. #define SEEN_XREG 2 /* ebx is used */
  55. #define SEEN_MEM 4 /* use mem[] for temporary storage */
  56. #define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
  57. #define SEEN_LITERAL 16 /* code uses literals */
  58. #define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
  59. #define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
  60. #define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
  61. #define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
  62. #define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
  63. #define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
  64. #define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
  65. #define EMIT2(op) \
  66. ({ \
  67. if (jit->prg + 2 <= jit->mid) \
  68. *(u16 *) jit->prg = op; \
  69. jit->prg += 2; \
  70. })
  71. #define EMIT4(op) \
  72. ({ \
  73. if (jit->prg + 4 <= jit->mid) \
  74. *(u32 *) jit->prg = op; \
  75. jit->prg += 4; \
  76. })
  77. #define EMIT4_DISP(op, disp) \
  78. ({ \
  79. unsigned int __disp = (disp) & 0xfff; \
  80. EMIT4(op | __disp); \
  81. })
  82. #define EMIT4_IMM(op, imm) \
  83. ({ \
  84. unsigned int __imm = (imm) & 0xffff; \
  85. EMIT4(op | __imm); \
  86. })
  87. #define EMIT4_PCREL(op, pcrel) \
  88. ({ \
  89. long __pcrel = ((pcrel) >> 1) & 0xffff; \
  90. EMIT4(op | __pcrel); \
  91. })
  92. #define EMIT6(op1, op2) \
  93. ({ \
  94. if (jit->prg + 6 <= jit->mid) { \
  95. *(u32 *) jit->prg = op1; \
  96. *(u16 *) (jit->prg + 4) = op2; \
  97. } \
  98. jit->prg += 6; \
  99. })
  100. #define EMIT6_DISP(op1, op2, disp) \
  101. ({ \
  102. unsigned int __disp = (disp) & 0xfff; \
  103. EMIT6(op1 | __disp, op2); \
  104. })
  105. #define EMIT6_IMM(op, imm) \
  106. ({ \
  107. unsigned int __imm = (imm); \
  108. EMIT6(op | (__imm >> 16), __imm & 0xffff); \
  109. })
  110. #define EMIT_CONST(val) \
  111. ({ \
  112. unsigned int ret; \
  113. ret = (unsigned int) (jit->lit - jit->base_ip); \
  114. jit->seen |= SEEN_LITERAL; \
  115. if (jit->lit + 4 <= jit->end) \
  116. *(u32 *) jit->lit = val; \
  117. jit->lit += 4; \
  118. ret; \
  119. })
  120. #define EMIT_FN_CONST(bit, fn) \
  121. ({ \
  122. unsigned int ret; \
  123. ret = (unsigned int) (jit->lit - jit->base_ip); \
  124. if (jit->seen & bit) { \
  125. jit->seen |= SEEN_LITERAL; \
  126. if (jit->lit + 8 <= jit->end) \
  127. *(void **) jit->lit = fn; \
  128. jit->lit += 8; \
  129. } \
  130. ret; \
  131. })
  132. static void bpf_jit_prologue(struct bpf_jit *jit)
  133. {
  134. /* Save registers and create stack frame if necessary */
  135. if (jit->seen & SEEN_DATAREF) {
  136. /* stmg %r8,%r15,88(%r15) */
  137. EMIT6(0xeb8ff058, 0x0024);
  138. /* lgr %r14,%r15 */
  139. EMIT4(0xb90400ef);
  140. /* ahi %r15,<offset> */
  141. EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
  142. /* stg %r14,152(%r15) */
  143. EMIT6(0xe3e0f098, 0x0024);
  144. } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  145. /* stmg %r12,%r13,120(%r15) */
  146. EMIT6(0xebcdf078, 0x0024);
  147. else if (jit->seen & SEEN_XREG)
  148. /* stg %r12,120(%r15) */
  149. EMIT6(0xe3c0f078, 0x0024);
  150. else if (jit->seen & SEEN_LITERAL)
  151. /* stg %r13,128(%r15) */
  152. EMIT6(0xe3d0f080, 0x0024);
  153. /* Setup literal pool */
  154. if (jit->seen & SEEN_LITERAL) {
  155. /* basr %r13,0 */
  156. EMIT2(0x0dd0);
  157. jit->base_ip = jit->prg;
  158. }
  159. jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
  160. jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
  161. jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
  162. jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
  163. jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
  164. jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
  165. jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
  166. /* Filter needs to access skb data */
  167. if (jit->seen & SEEN_DATAREF) {
  168. /* l %r11,<len>(%r2) */
  169. EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
  170. /* s %r11,<data_len>(%r2) */
  171. EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
  172. /* lg %r10,<data>(%r2) */
  173. EMIT6_DISP(0xe3a02000, 0x0004,
  174. offsetof(struct sk_buff, data));
  175. }
  176. }
  177. static void bpf_jit_epilogue(struct bpf_jit *jit)
  178. {
  179. /* Return 0 */
  180. if (jit->seen & SEEN_RET0) {
  181. jit->ret0_ip = jit->prg;
  182. /* lghi %r2,0 */
  183. EMIT4(0xa7290000);
  184. }
  185. jit->exit_ip = jit->prg;
  186. /* Restore registers */
  187. if (jit->seen & SEEN_DATAREF)
  188. /* lmg %r8,%r15,<offset>(%r15) */
  189. EMIT6_DISP(0xeb8ff000, 0x0004,
  190. (jit->seen & SEEN_MEM) ? 200 : 168);
  191. else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  192. /* lmg %r12,%r13,120(%r15) */
  193. EMIT6(0xebcdf078, 0x0004);
  194. else if (jit->seen & SEEN_XREG)
  195. /* lg %r12,120(%r15) */
  196. EMIT6(0xe3c0f078, 0x0004);
  197. else if (jit->seen & SEEN_LITERAL)
  198. /* lg %r13,128(%r15) */
  199. EMIT6(0xe3d0f080, 0x0004);
  200. /* br %r14 */
  201. EMIT2(0x07fe);
  202. }
  203. /*
  204. * make sure we dont leak kernel information to user
  205. */
  206. static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
  207. {
  208. /* Clear temporary memory if (seen & SEEN_MEM) */
  209. if (jit->seen & SEEN_MEM)
  210. /* xc 0(64,%r15),0(%r15) */
  211. EMIT6(0xd73ff000, 0xf000);
  212. /* Clear X if (seen & SEEN_XREG) */
  213. if (jit->seen & SEEN_XREG)
  214. /* lhi %r12,0 */
  215. EMIT4(0xa7c80000);
  216. /* Clear A if the first register does not set it. */
  217. switch (filter[0].code) {
  218. case BPF_S_LD_W_ABS:
  219. case BPF_S_LD_H_ABS:
  220. case BPF_S_LD_B_ABS:
  221. case BPF_S_LD_W_LEN:
  222. case BPF_S_LD_W_IND:
  223. case BPF_S_LD_H_IND:
  224. case BPF_S_LD_B_IND:
  225. case BPF_S_LDX_B_MSH:
  226. case BPF_S_LD_IMM:
  227. case BPF_S_LD_MEM:
  228. case BPF_S_MISC_TXA:
  229. case BPF_S_ANC_PROTOCOL:
  230. case BPF_S_ANC_PKTTYPE:
  231. case BPF_S_ANC_IFINDEX:
  232. case BPF_S_ANC_MARK:
  233. case BPF_S_ANC_QUEUE:
  234. case BPF_S_ANC_HATYPE:
  235. case BPF_S_ANC_RXHASH:
  236. case BPF_S_ANC_CPU:
  237. case BPF_S_RET_K:
  238. /* first instruction sets A register */
  239. break;
  240. default: /* A = 0 */
  241. /* lhi %r5,0 */
  242. EMIT4(0xa7580000);
  243. }
  244. }
  245. static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
  246. unsigned int *addrs, int i, int last)
  247. {
  248. unsigned int K;
  249. int offset;
  250. unsigned int mask;
  251. K = filter->k;
  252. switch (filter->code) {
  253. case BPF_S_ALU_ADD_X: /* A += X */
  254. jit->seen |= SEEN_XREG;
  255. /* ar %r5,%r12 */
  256. EMIT2(0x1a5c);
  257. break;
  258. case BPF_S_ALU_ADD_K: /* A += K */
  259. if (!K)
  260. break;
  261. if (K <= 16383)
  262. /* ahi %r5,<K> */
  263. EMIT4_IMM(0xa75a0000, K);
  264. else if (test_facility(21))
  265. /* alfi %r5,<K> */
  266. EMIT6_IMM(0xc25b0000, K);
  267. else
  268. /* a %r5,<d(K)>(%r13) */
  269. EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
  270. break;
  271. case BPF_S_ALU_SUB_X: /* A -= X */
  272. jit->seen |= SEEN_XREG;
  273. /* sr %r5,%r12 */
  274. EMIT2(0x1b5c);
  275. break;
  276. case BPF_S_ALU_SUB_K: /* A -= K */
  277. if (!K)
  278. break;
  279. if (K <= 16384)
  280. /* ahi %r5,-K */
  281. EMIT4_IMM(0xa75a0000, -K);
  282. else if (test_facility(21))
  283. /* alfi %r5,-K */
  284. EMIT6_IMM(0xc25b0000, -K);
  285. else
  286. /* s %r5,<d(K)>(%r13) */
  287. EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
  288. break;
  289. case BPF_S_ALU_MUL_X: /* A *= X */
  290. jit->seen |= SEEN_XREG;
  291. /* msr %r5,%r12 */
  292. EMIT4(0xb252005c);
  293. break;
  294. case BPF_S_ALU_MUL_K: /* A *= K */
  295. if (K <= 16383)
  296. /* mhi %r5,K */
  297. EMIT4_IMM(0xa75c0000, K);
  298. else if (test_facility(34))
  299. /* msfi %r5,<K> */
  300. EMIT6_IMM(0xc2510000, K);
  301. else
  302. /* ms %r5,<d(K)>(%r13) */
  303. EMIT4_DISP(0x7150d000, EMIT_CONST(K));
  304. break;
  305. case BPF_S_ALU_DIV_X: /* A /= X */
  306. jit->seen |= SEEN_XREG | SEEN_RET0;
  307. /* ltr %r12,%r12 */
  308. EMIT2(0x12cc);
  309. /* jz <ret0> */
  310. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  311. /* lhi %r4,0 */
  312. EMIT4(0xa7480000);
  313. /* dr %r4,%r12 */
  314. EMIT2(0x1d4c);
  315. break;
  316. case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
  317. /* m %r4,<d(K)>(%r13) */
  318. EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
  319. /* lr %r5,%r4 */
  320. EMIT2(0x1854);
  321. break;
  322. case BPF_S_ALU_AND_X: /* A &= X */
  323. jit->seen |= SEEN_XREG;
  324. /* nr %r5,%r12 */
  325. EMIT2(0x145c);
  326. break;
  327. case BPF_S_ALU_AND_K: /* A &= K */
  328. if (test_facility(21))
  329. /* nilf %r5,<K> */
  330. EMIT6_IMM(0xc05b0000, K);
  331. else
  332. /* n %r5,<d(K)>(%r13) */
  333. EMIT4_DISP(0x5450d000, EMIT_CONST(K));
  334. break;
  335. case BPF_S_ALU_OR_X: /* A |= X */
  336. jit->seen |= SEEN_XREG;
  337. /* or %r5,%r12 */
  338. EMIT2(0x165c);
  339. break;
  340. case BPF_S_ALU_OR_K: /* A |= K */
  341. if (test_facility(21))
  342. /* oilf %r5,<K> */
  343. EMIT6_IMM(0xc05d0000, K);
  344. else
  345. /* o %r5,<d(K)>(%r13) */
  346. EMIT4_DISP(0x5650d000, EMIT_CONST(K));
  347. break;
  348. case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
  349. jit->seen |= SEEN_XREG;
  350. /* xr %r5,%r12 */
  351. EMIT2(0x175c);
  352. break;
  353. case BPF_S_ALU_LSH_X: /* A <<= X; */
  354. jit->seen |= SEEN_XREG;
  355. /* sll %r5,0(%r12) */
  356. EMIT4(0x8950c000);
  357. break;
  358. case BPF_S_ALU_LSH_K: /* A <<= K */
  359. if (K == 0)
  360. break;
  361. /* sll %r5,K */
  362. EMIT4_DISP(0x89500000, K);
  363. break;
  364. case BPF_S_ALU_RSH_X: /* A >>= X; */
  365. jit->seen |= SEEN_XREG;
  366. /* srl %r5,0(%r12) */
  367. EMIT4(0x8850c000);
  368. break;
  369. case BPF_S_ALU_RSH_K: /* A >>= K; */
  370. if (K == 0)
  371. break;
  372. /* srl %r5,K */
  373. EMIT4_DISP(0x88500000, K);
  374. break;
  375. case BPF_S_ALU_NEG: /* A = -A */
  376. /* lnr %r5,%r5 */
  377. EMIT2(0x1155);
  378. break;
  379. case BPF_S_JMP_JA: /* ip += K */
  380. offset = addrs[i + K] + jit->start - jit->prg;
  381. EMIT4_PCREL(0xa7f40000, offset);
  382. break;
  383. case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
  384. mask = 0x200000; /* jh */
  385. goto kbranch;
  386. case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
  387. mask = 0xa00000; /* jhe */
  388. goto kbranch;
  389. case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
  390. mask = 0x800000; /* je */
  391. kbranch: /* Emit compare if the branch targets are different */
  392. if (filter->jt != filter->jf) {
  393. if (K <= 16383)
  394. /* chi %r5,<K> */
  395. EMIT4_IMM(0xa75e0000, K);
  396. else if (test_facility(21))
  397. /* clfi %r5,<K> */
  398. EMIT6_IMM(0xc25f0000, K);
  399. else
  400. /* c %r5,<d(K)>(%r13) */
  401. EMIT4_DISP(0x5950d000, EMIT_CONST(K));
  402. }
  403. branch: if (filter->jt == filter->jf) {
  404. if (filter->jt == 0)
  405. break;
  406. /* j <jt> */
  407. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  408. EMIT4_PCREL(0xa7f40000, offset);
  409. break;
  410. }
  411. if (filter->jt != 0) {
  412. /* brc <mask>,<jt> */
  413. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  414. EMIT4_PCREL(0xa7040000 | mask, offset);
  415. }
  416. if (filter->jf != 0) {
  417. /* brc <mask^15>,<jf> */
  418. offset = addrs[i + filter->jf] + jit->start - jit->prg;
  419. EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
  420. }
  421. break;
  422. case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
  423. mask = 0x700000; /* jnz */
  424. /* Emit test if the branch targets are different */
  425. if (filter->jt != filter->jf) {
  426. if (K > 65535) {
  427. /* lr %r4,%r5 */
  428. EMIT2(0x1845);
  429. /* n %r4,<d(K)>(%r13) */
  430. EMIT4_DISP(0x5440d000, EMIT_CONST(K));
  431. } else
  432. /* tmll %r5,K */
  433. EMIT4_IMM(0xa7510000, K);
  434. }
  435. goto branch;
  436. case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
  437. mask = 0x200000; /* jh */
  438. goto xbranch;
  439. case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
  440. mask = 0xa00000; /* jhe */
  441. goto xbranch;
  442. case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
  443. mask = 0x800000; /* je */
  444. xbranch: /* Emit compare if the branch targets are different */
  445. if (filter->jt != filter->jf) {
  446. jit->seen |= SEEN_XREG;
  447. /* cr %r5,%r12 */
  448. EMIT2(0x195c);
  449. }
  450. goto branch;
  451. case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
  452. mask = 0x700000; /* jnz */
  453. /* Emit test if the branch targets are different */
  454. if (filter->jt != filter->jf) {
  455. jit->seen |= SEEN_XREG;
  456. /* lr %r4,%r5 */
  457. EMIT2(0x1845);
  458. /* nr %r4,%r12 */
  459. EMIT2(0x144c);
  460. }
  461. goto branch;
  462. case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
  463. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
  464. offset = jit->off_load_word;
  465. goto load_abs;
  466. case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
  467. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
  468. offset = jit->off_load_half;
  469. goto load_abs;
  470. case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
  471. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
  472. offset = jit->off_load_byte;
  473. load_abs: if ((int) K < 0)
  474. goto out;
  475. call_fn: /* lg %r1,<d(function)>(%r13) */
  476. EMIT6_DISP(0xe310d000, 0x0004, offset);
  477. /* l %r3,<d(K)>(%r13) */
  478. EMIT4_DISP(0x5830d000, EMIT_CONST(K));
  479. /* basr %r8,%r1 */
  480. EMIT2(0x0d81);
  481. /* jnz <ret0> */
  482. EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
  483. break;
  484. case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
  485. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
  486. offset = jit->off_load_iword;
  487. goto call_fn;
  488. case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
  489. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
  490. offset = jit->off_load_ihalf;
  491. goto call_fn;
  492. case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
  493. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
  494. offset = jit->off_load_ibyte;
  495. goto call_fn;
  496. case BPF_S_LDX_B_MSH:
  497. /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
  498. jit->seen |= SEEN_RET0;
  499. if ((int) K < 0) {
  500. /* j <ret0> */
  501. EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
  502. break;
  503. }
  504. jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
  505. offset = jit->off_load_bmsh;
  506. goto call_fn;
  507. case BPF_S_LD_W_LEN: /* A = skb->len; */
  508. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  509. /* l %r5,<d(len)>(%r2) */
  510. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
  511. break;
  512. case BPF_S_LDX_W_LEN: /* X = skb->len; */
  513. jit->seen |= SEEN_XREG;
  514. /* l %r12,<d(len)>(%r2) */
  515. EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
  516. break;
  517. case BPF_S_LD_IMM: /* A = K */
  518. if (K <= 16383)
  519. /* lhi %r5,K */
  520. EMIT4_IMM(0xa7580000, K);
  521. else if (test_facility(21))
  522. /* llilf %r5,<K> */
  523. EMIT6_IMM(0xc05f0000, K);
  524. else
  525. /* l %r5,<d(K)>(%r13) */
  526. EMIT4_DISP(0x5850d000, EMIT_CONST(K));
  527. break;
  528. case BPF_S_LDX_IMM: /* X = K */
  529. jit->seen |= SEEN_XREG;
  530. if (K <= 16383)
  531. /* lhi %r12,<K> */
  532. EMIT4_IMM(0xa7c80000, K);
  533. else if (test_facility(21))
  534. /* llilf %r12,<K> */
  535. EMIT6_IMM(0xc0cf0000, K);
  536. else
  537. /* l %r12,<d(K)>(%r13) */
  538. EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
  539. break;
  540. case BPF_S_LD_MEM: /* A = mem[K] */
  541. jit->seen |= SEEN_MEM;
  542. /* l %r5,<K>(%r15) */
  543. EMIT4_DISP(0x5850f000,
  544. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  545. break;
  546. case BPF_S_LDX_MEM: /* X = mem[K] */
  547. jit->seen |= SEEN_XREG | SEEN_MEM;
  548. /* l %r12,<K>(%r15) */
  549. EMIT4_DISP(0x58c0f000,
  550. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  551. break;
  552. case BPF_S_MISC_TAX: /* X = A */
  553. jit->seen |= SEEN_XREG;
  554. /* lr %r12,%r5 */
  555. EMIT2(0x18c5);
  556. break;
  557. case BPF_S_MISC_TXA: /* A = X */
  558. jit->seen |= SEEN_XREG;
  559. /* lr %r5,%r12 */
  560. EMIT2(0x185c);
  561. break;
  562. case BPF_S_RET_K:
  563. if (K == 0) {
  564. jit->seen |= SEEN_RET0;
  565. if (last)
  566. break;
  567. /* j <ret0> */
  568. EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
  569. } else {
  570. if (K <= 16383)
  571. /* lghi %r2,K */
  572. EMIT4_IMM(0xa7290000, K);
  573. else
  574. /* llgf %r2,<K>(%r13) */
  575. EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
  576. /* j <exit> */
  577. if (last && !(jit->seen & SEEN_RET0))
  578. break;
  579. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  580. }
  581. break;
  582. case BPF_S_RET_A:
  583. /* llgfr %r2,%r5 */
  584. EMIT4(0xb9160025);
  585. /* j <exit> */
  586. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  587. break;
  588. case BPF_S_ST: /* mem[K] = A */
  589. jit->seen |= SEEN_MEM;
  590. /* st %r5,<K>(%r15) */
  591. EMIT4_DISP(0x5050f000,
  592. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  593. break;
  594. case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
  595. jit->seen |= SEEN_XREG | SEEN_MEM;
  596. /* st %r12,<K>(%r15) */
  597. EMIT4_DISP(0x50c0f000,
  598. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  599. break;
  600. case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
  601. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
  602. /* lhi %r5,0 */
  603. EMIT4(0xa7580000);
  604. /* icm %r5,3,<d(protocol)>(%r2) */
  605. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
  606. break;
  607. case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
  608. * A = skb->dev->ifindex */
  609. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
  610. jit->seen |= SEEN_RET0;
  611. /* lg %r1,<d(dev)>(%r2) */
  612. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  613. /* ltgr %r1,%r1 */
  614. EMIT4(0xb9020011);
  615. /* jz <ret0> */
  616. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  617. /* l %r5,<d(ifindex)>(%r1) */
  618. EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
  619. break;
  620. case BPF_S_ANC_MARK: /* A = skb->mark */
  621. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  622. /* l %r5,<d(mark)>(%r2) */
  623. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
  624. break;
  625. case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
  626. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
  627. /* lhi %r5,0 */
  628. EMIT4(0xa7580000);
  629. /* icm %r5,3,<d(queue_mapping)>(%r2) */
  630. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
  631. break;
  632. case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
  633. * A = skb->dev->type */
  634. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
  635. jit->seen |= SEEN_RET0;
  636. /* lg %r1,<d(dev)>(%r2) */
  637. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  638. /* ltgr %r1,%r1 */
  639. EMIT4(0xb9020011);
  640. /* jz <ret0> */
  641. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  642. /* lhi %r5,0 */
  643. EMIT4(0xa7580000);
  644. /* icm %r5,3,<d(type)>(%r1) */
  645. EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
  646. break;
  647. case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
  648. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
  649. /* l %r5,<d(rxhash)>(%r2) */
  650. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
  651. break;
  652. case BPF_S_ANC_CPU: /* A = smp_processor_id() */
  653. #ifdef CONFIG_SMP
  654. /* l %r5,<d(cpu_nr)> */
  655. EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
  656. #else
  657. /* lhi %r5,0 */
  658. EMIT4(0xa7580000);
  659. #endif
  660. break;
  661. default: /* too complex, give up */
  662. goto out;
  663. }
  664. addrs[i] = jit->prg - jit->start;
  665. return 0;
  666. out:
  667. return -1;
  668. }
  669. void bpf_jit_compile(struct sk_filter *fp)
  670. {
  671. unsigned long size, prg_len, lit_len;
  672. struct bpf_jit jit, cjit;
  673. unsigned int *addrs;
  674. int pass, i;
  675. if (!bpf_jit_enable)
  676. return;
  677. addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
  678. if (addrs == NULL)
  679. return;
  680. memset(addrs, 0, fp->len * sizeof(*addrs));
  681. memset(&jit, 0, sizeof(cjit));
  682. memset(&cjit, 0, sizeof(cjit));
  683. for (pass = 0; pass < 10; pass++) {
  684. jit.prg = jit.start;
  685. jit.lit = jit.mid;
  686. bpf_jit_prologue(&jit);
  687. bpf_jit_noleaks(&jit, fp->insns);
  688. for (i = 0; i < fp->len; i++) {
  689. if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
  690. i == fp->len - 1))
  691. goto out;
  692. }
  693. bpf_jit_epilogue(&jit);
  694. if (jit.start) {
  695. WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
  696. if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
  697. break;
  698. } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
  699. prg_len = jit.prg - jit.start;
  700. lit_len = jit.lit - jit.mid;
  701. size = max_t(unsigned long, prg_len + lit_len,
  702. sizeof(struct work_struct));
  703. if (size >= BPF_SIZE_MAX)
  704. goto out;
  705. jit.start = module_alloc(size);
  706. if (!jit.start)
  707. goto out;
  708. jit.prg = jit.mid = jit.start + prg_len;
  709. jit.lit = jit.end = jit.start + prg_len + lit_len;
  710. jit.base_ip += (unsigned long) jit.start;
  711. jit.exit_ip += (unsigned long) jit.start;
  712. jit.ret0_ip += (unsigned long) jit.start;
  713. }
  714. cjit = jit;
  715. }
  716. if (bpf_jit_enable > 1) {
  717. pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
  718. fp->len, jit.end - jit.start, pass, jit.start);
  719. if (jit.start) {
  720. printk(KERN_ERR "JIT code:\n");
  721. print_fn_code(jit.start, jit.mid - jit.start);
  722. print_hex_dump(KERN_ERR, "JIT literals:\n",
  723. DUMP_PREFIX_ADDRESS, 16, 1,
  724. jit.mid, jit.end - jit.mid, false);
  725. }
  726. }
  727. if (jit.start)
  728. fp->bpf_func = (void *) jit.start;
  729. out:
  730. kfree(addrs);
  731. }
  732. static void jit_free_defer(struct work_struct *arg)
  733. {
  734. module_free(NULL, arg);
  735. }
  736. /* run from softirq, we must use a work_struct to call
  737. * module_free() from process context
  738. */
  739. void bpf_jit_free(struct sk_filter *fp)
  740. {
  741. struct work_struct *work;
  742. if (fp->bpf_func == sk_run_filter)
  743. return;
  744. work = (struct work_struct *)fp->bpf_func;
  745. INIT_WORK(work, jit_free_defer);
  746. schedule_work(work);
  747. }