bpf_jit_comp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /*
  2. * BPF Jit compiler for s390.
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/moduleloader.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/filter.h>
  12. #include <linux/random.h>
  13. #include <linux/init.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/processor.h>
  16. #include <asm/facility.h>
  17. /*
  18. * Conventions:
  19. * %r2 = skb pointer
  20. * %r3 = offset parameter
  21. * %r4 = scratch register / length parameter
  22. * %r5 = BPF A accumulator
  23. * %r8 = return address
  24. * %r9 = save register for skb pointer
  25. * %r10 = skb->data
  26. * %r11 = skb->len - skb->data_len (headlen)
  27. * %r12 = BPF X accumulator
  28. * %r13 = literal pool pointer
  29. * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
  30. */
  31. int bpf_jit_enable __read_mostly;
  32. /*
  33. * assembly code in arch/x86/net/bpf_jit.S
  34. */
  35. extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
  36. extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
  37. struct bpf_jit {
  38. unsigned int seen;
  39. u8 *start;
  40. u8 *prg;
  41. u8 *mid;
  42. u8 *lit;
  43. u8 *end;
  44. u8 *base_ip;
  45. u8 *ret0_ip;
  46. u8 *exit_ip;
  47. unsigned int off_load_word;
  48. unsigned int off_load_half;
  49. unsigned int off_load_byte;
  50. unsigned int off_load_bmsh;
  51. unsigned int off_load_iword;
  52. unsigned int off_load_ihalf;
  53. unsigned int off_load_ibyte;
  54. };
  55. #define BPF_SIZE_MAX 4096 /* Max size for program */
  56. #define SEEN_DATAREF 1 /* might call external helpers */
  57. #define SEEN_XREG 2 /* ebx is used */
  58. #define SEEN_MEM 4 /* use mem[] for temporary storage */
  59. #define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
  60. #define SEEN_LITERAL 16 /* code uses literals */
  61. #define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
  62. #define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
  63. #define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
  64. #define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
  65. #define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
  66. #define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
  67. #define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
  68. #define EMIT2(op) \
  69. ({ \
  70. if (jit->prg + 2 <= jit->mid) \
  71. *(u16 *) jit->prg = op; \
  72. jit->prg += 2; \
  73. })
  74. #define EMIT4(op) \
  75. ({ \
  76. if (jit->prg + 4 <= jit->mid) \
  77. *(u32 *) jit->prg = op; \
  78. jit->prg += 4; \
  79. })
  80. #define EMIT4_DISP(op, disp) \
  81. ({ \
  82. unsigned int __disp = (disp) & 0xfff; \
  83. EMIT4(op | __disp); \
  84. })
  85. #define EMIT4_IMM(op, imm) \
  86. ({ \
  87. unsigned int __imm = (imm) & 0xffff; \
  88. EMIT4(op | __imm); \
  89. })
  90. #define EMIT4_PCREL(op, pcrel) \
  91. ({ \
  92. long __pcrel = ((pcrel) >> 1) & 0xffff; \
  93. EMIT4(op | __pcrel); \
  94. })
  95. #define EMIT6(op1, op2) \
  96. ({ \
  97. if (jit->prg + 6 <= jit->mid) { \
  98. *(u32 *) jit->prg = op1; \
  99. *(u16 *) (jit->prg + 4) = op2; \
  100. } \
  101. jit->prg += 6; \
  102. })
  103. #define EMIT6_DISP(op1, op2, disp) \
  104. ({ \
  105. unsigned int __disp = (disp) & 0xfff; \
  106. EMIT6(op1 | __disp, op2); \
  107. })
  108. #define EMIT6_IMM(op, imm) \
  109. ({ \
  110. unsigned int __imm = (imm); \
  111. EMIT6(op | (__imm >> 16), __imm & 0xffff); \
  112. })
  113. #define EMIT_CONST(val) \
  114. ({ \
  115. unsigned int ret; \
  116. ret = (unsigned int) (jit->lit - jit->base_ip); \
  117. jit->seen |= SEEN_LITERAL; \
  118. if (jit->lit + 4 <= jit->end) \
  119. *(u32 *) jit->lit = val; \
  120. jit->lit += 4; \
  121. ret; \
  122. })
  123. #define EMIT_FN_CONST(bit, fn) \
  124. ({ \
  125. unsigned int ret; \
  126. ret = (unsigned int) (jit->lit - jit->base_ip); \
  127. if (jit->seen & bit) { \
  128. jit->seen |= SEEN_LITERAL; \
  129. if (jit->lit + 8 <= jit->end) \
  130. *(void **) jit->lit = fn; \
  131. jit->lit += 8; \
  132. } \
  133. ret; \
  134. })
  135. static void bpf_jit_prologue(struct bpf_jit *jit)
  136. {
  137. /* Save registers and create stack frame if necessary */
  138. if (jit->seen & SEEN_DATAREF) {
  139. /* stmg %r8,%r15,88(%r15) */
  140. EMIT6(0xeb8ff058, 0x0024);
  141. /* lgr %r14,%r15 */
  142. EMIT4(0xb90400ef);
  143. /* ahi %r15,<offset> */
  144. EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
  145. /* stg %r14,152(%r15) */
  146. EMIT6(0xe3e0f098, 0x0024);
  147. } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  148. /* stmg %r12,%r13,120(%r15) */
  149. EMIT6(0xebcdf078, 0x0024);
  150. else if (jit->seen & SEEN_XREG)
  151. /* stg %r12,120(%r15) */
  152. EMIT6(0xe3c0f078, 0x0024);
  153. else if (jit->seen & SEEN_LITERAL)
  154. /* stg %r13,128(%r15) */
  155. EMIT6(0xe3d0f080, 0x0024);
  156. /* Setup literal pool */
  157. if (jit->seen & SEEN_LITERAL) {
  158. /* basr %r13,0 */
  159. EMIT2(0x0dd0);
  160. jit->base_ip = jit->prg;
  161. }
  162. jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
  163. jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
  164. jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
  165. jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
  166. jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
  167. jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
  168. jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
  169. /* Filter needs to access skb data */
  170. if (jit->seen & SEEN_DATAREF) {
  171. /* l %r11,<len>(%r2) */
  172. EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
  173. /* s %r11,<data_len>(%r2) */
  174. EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
  175. /* lg %r10,<data>(%r2) */
  176. EMIT6_DISP(0xe3a02000, 0x0004,
  177. offsetof(struct sk_buff, data));
  178. }
  179. }
  180. static void bpf_jit_epilogue(struct bpf_jit *jit)
  181. {
  182. /* Return 0 */
  183. if (jit->seen & SEEN_RET0) {
  184. jit->ret0_ip = jit->prg;
  185. /* lghi %r2,0 */
  186. EMIT4(0xa7290000);
  187. }
  188. jit->exit_ip = jit->prg;
  189. /* Restore registers */
  190. if (jit->seen & SEEN_DATAREF)
  191. /* lmg %r8,%r15,<offset>(%r15) */
  192. EMIT6_DISP(0xeb8ff000, 0x0004,
  193. (jit->seen & SEEN_MEM) ? 200 : 168);
  194. else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  195. /* lmg %r12,%r13,120(%r15) */
  196. EMIT6(0xebcdf078, 0x0004);
  197. else if (jit->seen & SEEN_XREG)
  198. /* lg %r12,120(%r15) */
  199. EMIT6(0xe3c0f078, 0x0004);
  200. else if (jit->seen & SEEN_LITERAL)
  201. /* lg %r13,128(%r15) */
  202. EMIT6(0xe3d0f080, 0x0004);
  203. /* br %r14 */
  204. EMIT2(0x07fe);
  205. }
  206. /* Helper to find the offset of pkt_type in sk_buff
  207. * Make sure its still a 3bit field starting at the MSBs within a byte.
  208. */
  209. #define PKT_TYPE_MAX 0xe0
  210. static int pkt_type_offset;
  211. static int __init bpf_pkt_type_offset_init(void)
  212. {
  213. struct sk_buff skb_probe = {
  214. .pkt_type = ~0,
  215. };
  216. char *ct = (char *)&skb_probe;
  217. int off;
  218. pkt_type_offset = -1;
  219. for (off = 0; off < sizeof(struct sk_buff); off++) {
  220. if (!ct[off])
  221. continue;
  222. if (ct[off] == PKT_TYPE_MAX)
  223. pkt_type_offset = off;
  224. else {
  225. /* Found non matching bit pattern, fix needed. */
  226. WARN_ON_ONCE(1);
  227. pkt_type_offset = -1;
  228. return -1;
  229. }
  230. }
  231. return 0;
  232. }
  233. device_initcall(bpf_pkt_type_offset_init);
  234. /*
  235. * make sure we dont leak kernel information to user
  236. */
  237. static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
  238. {
  239. /* Clear temporary memory if (seen & SEEN_MEM) */
  240. if (jit->seen & SEEN_MEM)
  241. /* xc 0(64,%r15),0(%r15) */
  242. EMIT6(0xd73ff000, 0xf000);
  243. /* Clear X if (seen & SEEN_XREG) */
  244. if (jit->seen & SEEN_XREG)
  245. /* lhi %r12,0 */
  246. EMIT4(0xa7c80000);
  247. /* Clear A if the first register does not set it. */
  248. switch (filter[0].code) {
  249. case BPF_S_LD_W_ABS:
  250. case BPF_S_LD_H_ABS:
  251. case BPF_S_LD_B_ABS:
  252. case BPF_S_LD_W_LEN:
  253. case BPF_S_LD_W_IND:
  254. case BPF_S_LD_H_IND:
  255. case BPF_S_LD_B_IND:
  256. case BPF_S_LDX_B_MSH:
  257. case BPF_S_LD_IMM:
  258. case BPF_S_LD_MEM:
  259. case BPF_S_MISC_TXA:
  260. case BPF_S_ANC_PROTOCOL:
  261. case BPF_S_ANC_PKTTYPE:
  262. case BPF_S_ANC_IFINDEX:
  263. case BPF_S_ANC_MARK:
  264. case BPF_S_ANC_QUEUE:
  265. case BPF_S_ANC_HATYPE:
  266. case BPF_S_ANC_RXHASH:
  267. case BPF_S_ANC_CPU:
  268. case BPF_S_ANC_VLAN_TAG:
  269. case BPF_S_ANC_VLAN_TAG_PRESENT:
  270. case BPF_S_RET_K:
  271. /* first instruction sets A register */
  272. break;
  273. default: /* A = 0 */
  274. /* lhi %r5,0 */
  275. EMIT4(0xa7580000);
  276. }
  277. }
  278. static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
  279. unsigned int *addrs, int i, int last)
  280. {
  281. unsigned int K;
  282. int offset;
  283. unsigned int mask;
  284. K = filter->k;
  285. switch (filter->code) {
  286. case BPF_S_ALU_ADD_X: /* A += X */
  287. jit->seen |= SEEN_XREG;
  288. /* ar %r5,%r12 */
  289. EMIT2(0x1a5c);
  290. break;
  291. case BPF_S_ALU_ADD_K: /* A += K */
  292. if (!K)
  293. break;
  294. if (K <= 16383)
  295. /* ahi %r5,<K> */
  296. EMIT4_IMM(0xa75a0000, K);
  297. else if (test_facility(21))
  298. /* alfi %r5,<K> */
  299. EMIT6_IMM(0xc25b0000, K);
  300. else
  301. /* a %r5,<d(K)>(%r13) */
  302. EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
  303. break;
  304. case BPF_S_ALU_SUB_X: /* A -= X */
  305. jit->seen |= SEEN_XREG;
  306. /* sr %r5,%r12 */
  307. EMIT2(0x1b5c);
  308. break;
  309. case BPF_S_ALU_SUB_K: /* A -= K */
  310. if (!K)
  311. break;
  312. if (K <= 16384)
  313. /* ahi %r5,-K */
  314. EMIT4_IMM(0xa75a0000, -K);
  315. else if (test_facility(21))
  316. /* alfi %r5,-K */
  317. EMIT6_IMM(0xc25b0000, -K);
  318. else
  319. /* s %r5,<d(K)>(%r13) */
  320. EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
  321. break;
  322. case BPF_S_ALU_MUL_X: /* A *= X */
  323. jit->seen |= SEEN_XREG;
  324. /* msr %r5,%r12 */
  325. EMIT4(0xb252005c);
  326. break;
  327. case BPF_S_ALU_MUL_K: /* A *= K */
  328. if (K <= 16383)
  329. /* mhi %r5,K */
  330. EMIT4_IMM(0xa75c0000, K);
  331. else if (test_facility(34))
  332. /* msfi %r5,<K> */
  333. EMIT6_IMM(0xc2510000, K);
  334. else
  335. /* ms %r5,<d(K)>(%r13) */
  336. EMIT4_DISP(0x7150d000, EMIT_CONST(K));
  337. break;
  338. case BPF_S_ALU_DIV_X: /* A /= X */
  339. jit->seen |= SEEN_XREG | SEEN_RET0;
  340. /* ltr %r12,%r12 */
  341. EMIT2(0x12cc);
  342. /* jz <ret0> */
  343. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  344. /* lhi %r4,0 */
  345. EMIT4(0xa7480000);
  346. /* dr %r4,%r12 */
  347. EMIT2(0x1d4c);
  348. break;
  349. case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
  350. /* m %r4,<d(K)>(%r13) */
  351. EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
  352. /* lr %r5,%r4 */
  353. EMIT2(0x1854);
  354. break;
  355. case BPF_S_ALU_MOD_X: /* A %= X */
  356. jit->seen |= SEEN_XREG | SEEN_RET0;
  357. /* ltr %r12,%r12 */
  358. EMIT2(0x12cc);
  359. /* jz <ret0> */
  360. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  361. /* lhi %r4,0 */
  362. EMIT4(0xa7480000);
  363. /* dr %r4,%r12 */
  364. EMIT2(0x1d4c);
  365. /* lr %r5,%r4 */
  366. EMIT2(0x1854);
  367. break;
  368. case BPF_S_ALU_MOD_K: /* A %= K */
  369. /* lhi %r4,0 */
  370. EMIT4(0xa7480000);
  371. /* d %r4,<d(K)>(%r13) */
  372. EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
  373. /* lr %r5,%r4 */
  374. EMIT2(0x1854);
  375. break;
  376. case BPF_S_ALU_AND_X: /* A &= X */
  377. jit->seen |= SEEN_XREG;
  378. /* nr %r5,%r12 */
  379. EMIT2(0x145c);
  380. break;
  381. case BPF_S_ALU_AND_K: /* A &= K */
  382. if (test_facility(21))
  383. /* nilf %r5,<K> */
  384. EMIT6_IMM(0xc05b0000, K);
  385. else
  386. /* n %r5,<d(K)>(%r13) */
  387. EMIT4_DISP(0x5450d000, EMIT_CONST(K));
  388. break;
  389. case BPF_S_ALU_OR_X: /* A |= X */
  390. jit->seen |= SEEN_XREG;
  391. /* or %r5,%r12 */
  392. EMIT2(0x165c);
  393. break;
  394. case BPF_S_ALU_OR_K: /* A |= K */
  395. if (test_facility(21))
  396. /* oilf %r5,<K> */
  397. EMIT6_IMM(0xc05d0000, K);
  398. else
  399. /* o %r5,<d(K)>(%r13) */
  400. EMIT4_DISP(0x5650d000, EMIT_CONST(K));
  401. break;
  402. case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
  403. case BPF_S_ALU_XOR_X:
  404. jit->seen |= SEEN_XREG;
  405. /* xr %r5,%r12 */
  406. EMIT2(0x175c);
  407. break;
  408. case BPF_S_ALU_XOR_K: /* A ^= K */
  409. if (!K)
  410. break;
  411. /* x %r5,<d(K)>(%r13) */
  412. EMIT4_DISP(0x5750d000, EMIT_CONST(K));
  413. break;
  414. case BPF_S_ALU_LSH_X: /* A <<= X; */
  415. jit->seen |= SEEN_XREG;
  416. /* sll %r5,0(%r12) */
  417. EMIT4(0x8950c000);
  418. break;
  419. case BPF_S_ALU_LSH_K: /* A <<= K */
  420. if (K == 0)
  421. break;
  422. /* sll %r5,K */
  423. EMIT4_DISP(0x89500000, K);
  424. break;
  425. case BPF_S_ALU_RSH_X: /* A >>= X; */
  426. jit->seen |= SEEN_XREG;
  427. /* srl %r5,0(%r12) */
  428. EMIT4(0x8850c000);
  429. break;
  430. case BPF_S_ALU_RSH_K: /* A >>= K; */
  431. if (K == 0)
  432. break;
  433. /* srl %r5,K */
  434. EMIT4_DISP(0x88500000, K);
  435. break;
  436. case BPF_S_ALU_NEG: /* A = -A */
  437. /* lnr %r5,%r5 */
  438. EMIT2(0x1155);
  439. break;
  440. case BPF_S_JMP_JA: /* ip += K */
  441. offset = addrs[i + K] + jit->start - jit->prg;
  442. EMIT4_PCREL(0xa7f40000, offset);
  443. break;
  444. case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
  445. mask = 0x200000; /* jh */
  446. goto kbranch;
  447. case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
  448. mask = 0xa00000; /* jhe */
  449. goto kbranch;
  450. case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
  451. mask = 0x800000; /* je */
  452. kbranch: /* Emit compare if the branch targets are different */
  453. if (filter->jt != filter->jf) {
  454. if (K <= 16383)
  455. /* chi %r5,<K> */
  456. EMIT4_IMM(0xa75e0000, K);
  457. else if (test_facility(21))
  458. /* clfi %r5,<K> */
  459. EMIT6_IMM(0xc25f0000, K);
  460. else
  461. /* c %r5,<d(K)>(%r13) */
  462. EMIT4_DISP(0x5950d000, EMIT_CONST(K));
  463. }
  464. branch: if (filter->jt == filter->jf) {
  465. if (filter->jt == 0)
  466. break;
  467. /* j <jt> */
  468. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  469. EMIT4_PCREL(0xa7f40000, offset);
  470. break;
  471. }
  472. if (filter->jt != 0) {
  473. /* brc <mask>,<jt> */
  474. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  475. EMIT4_PCREL(0xa7040000 | mask, offset);
  476. }
  477. if (filter->jf != 0) {
  478. /* brc <mask^15>,<jf> */
  479. offset = addrs[i + filter->jf] + jit->start - jit->prg;
  480. EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
  481. }
  482. break;
  483. case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
  484. mask = 0x700000; /* jnz */
  485. /* Emit test if the branch targets are different */
  486. if (filter->jt != filter->jf) {
  487. if (K > 65535) {
  488. /* lr %r4,%r5 */
  489. EMIT2(0x1845);
  490. /* n %r4,<d(K)>(%r13) */
  491. EMIT4_DISP(0x5440d000, EMIT_CONST(K));
  492. } else
  493. /* tmll %r5,K */
  494. EMIT4_IMM(0xa7510000, K);
  495. }
  496. goto branch;
  497. case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
  498. mask = 0x200000; /* jh */
  499. goto xbranch;
  500. case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
  501. mask = 0xa00000; /* jhe */
  502. goto xbranch;
  503. case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
  504. mask = 0x800000; /* je */
  505. xbranch: /* Emit compare if the branch targets are different */
  506. if (filter->jt != filter->jf) {
  507. jit->seen |= SEEN_XREG;
  508. /* cr %r5,%r12 */
  509. EMIT2(0x195c);
  510. }
  511. goto branch;
  512. case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
  513. mask = 0x700000; /* jnz */
  514. /* Emit test if the branch targets are different */
  515. if (filter->jt != filter->jf) {
  516. jit->seen |= SEEN_XREG;
  517. /* lr %r4,%r5 */
  518. EMIT2(0x1845);
  519. /* nr %r4,%r12 */
  520. EMIT2(0x144c);
  521. }
  522. goto branch;
  523. case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
  524. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
  525. offset = jit->off_load_word;
  526. goto load_abs;
  527. case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
  528. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
  529. offset = jit->off_load_half;
  530. goto load_abs;
  531. case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
  532. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
  533. offset = jit->off_load_byte;
  534. load_abs: if ((int) K < 0)
  535. goto out;
  536. call_fn: /* lg %r1,<d(function)>(%r13) */
  537. EMIT6_DISP(0xe310d000, 0x0004, offset);
  538. /* l %r3,<d(K)>(%r13) */
  539. EMIT4_DISP(0x5830d000, EMIT_CONST(K));
  540. /* basr %r8,%r1 */
  541. EMIT2(0x0d81);
  542. /* jnz <ret0> */
  543. EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
  544. break;
  545. case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
  546. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
  547. offset = jit->off_load_iword;
  548. goto call_fn;
  549. case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
  550. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
  551. offset = jit->off_load_ihalf;
  552. goto call_fn;
  553. case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
  554. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
  555. offset = jit->off_load_ibyte;
  556. goto call_fn;
  557. case BPF_S_LDX_B_MSH:
  558. /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
  559. jit->seen |= SEEN_RET0;
  560. if ((int) K < 0) {
  561. /* j <ret0> */
  562. EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
  563. break;
  564. }
  565. jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
  566. offset = jit->off_load_bmsh;
  567. goto call_fn;
  568. case BPF_S_LD_W_LEN: /* A = skb->len; */
  569. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  570. /* l %r5,<d(len)>(%r2) */
  571. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
  572. break;
  573. case BPF_S_LDX_W_LEN: /* X = skb->len; */
  574. jit->seen |= SEEN_XREG;
  575. /* l %r12,<d(len)>(%r2) */
  576. EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
  577. break;
  578. case BPF_S_LD_IMM: /* A = K */
  579. if (K <= 16383)
  580. /* lhi %r5,K */
  581. EMIT4_IMM(0xa7580000, K);
  582. else if (test_facility(21))
  583. /* llilf %r5,<K> */
  584. EMIT6_IMM(0xc05f0000, K);
  585. else
  586. /* l %r5,<d(K)>(%r13) */
  587. EMIT4_DISP(0x5850d000, EMIT_CONST(K));
  588. break;
  589. case BPF_S_LDX_IMM: /* X = K */
  590. jit->seen |= SEEN_XREG;
  591. if (K <= 16383)
  592. /* lhi %r12,<K> */
  593. EMIT4_IMM(0xa7c80000, K);
  594. else if (test_facility(21))
  595. /* llilf %r12,<K> */
  596. EMIT6_IMM(0xc0cf0000, K);
  597. else
  598. /* l %r12,<d(K)>(%r13) */
  599. EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
  600. break;
  601. case BPF_S_LD_MEM: /* A = mem[K] */
  602. jit->seen |= SEEN_MEM;
  603. /* l %r5,<K>(%r15) */
  604. EMIT4_DISP(0x5850f000,
  605. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  606. break;
  607. case BPF_S_LDX_MEM: /* X = mem[K] */
  608. jit->seen |= SEEN_XREG | SEEN_MEM;
  609. /* l %r12,<K>(%r15) */
  610. EMIT4_DISP(0x58c0f000,
  611. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  612. break;
  613. case BPF_S_MISC_TAX: /* X = A */
  614. jit->seen |= SEEN_XREG;
  615. /* lr %r12,%r5 */
  616. EMIT2(0x18c5);
  617. break;
  618. case BPF_S_MISC_TXA: /* A = X */
  619. jit->seen |= SEEN_XREG;
  620. /* lr %r5,%r12 */
  621. EMIT2(0x185c);
  622. break;
  623. case BPF_S_RET_K:
  624. if (K == 0) {
  625. jit->seen |= SEEN_RET0;
  626. if (last)
  627. break;
  628. /* j <ret0> */
  629. EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
  630. } else {
  631. if (K <= 16383)
  632. /* lghi %r2,K */
  633. EMIT4_IMM(0xa7290000, K);
  634. else
  635. /* llgf %r2,<K>(%r13) */
  636. EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
  637. /* j <exit> */
  638. if (last && !(jit->seen & SEEN_RET0))
  639. break;
  640. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  641. }
  642. break;
  643. case BPF_S_RET_A:
  644. /* llgfr %r2,%r5 */
  645. EMIT4(0xb9160025);
  646. /* j <exit> */
  647. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  648. break;
  649. case BPF_S_ST: /* mem[K] = A */
  650. jit->seen |= SEEN_MEM;
  651. /* st %r5,<K>(%r15) */
  652. EMIT4_DISP(0x5050f000,
  653. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  654. break;
  655. case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
  656. jit->seen |= SEEN_XREG | SEEN_MEM;
  657. /* st %r12,<K>(%r15) */
  658. EMIT4_DISP(0x50c0f000,
  659. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  660. break;
  661. case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
  662. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
  663. /* lhi %r5,0 */
  664. EMIT4(0xa7580000);
  665. /* icm %r5,3,<d(protocol)>(%r2) */
  666. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
  667. break;
  668. case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
  669. * A = skb->dev->ifindex */
  670. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
  671. jit->seen |= SEEN_RET0;
  672. /* lg %r1,<d(dev)>(%r2) */
  673. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  674. /* ltgr %r1,%r1 */
  675. EMIT4(0xb9020011);
  676. /* jz <ret0> */
  677. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  678. /* l %r5,<d(ifindex)>(%r1) */
  679. EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
  680. break;
  681. case BPF_S_ANC_MARK: /* A = skb->mark */
  682. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  683. /* l %r5,<d(mark)>(%r2) */
  684. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
  685. break;
  686. case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
  687. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
  688. /* lhi %r5,0 */
  689. EMIT4(0xa7580000);
  690. /* icm %r5,3,<d(queue_mapping)>(%r2) */
  691. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
  692. break;
  693. case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
  694. * A = skb->dev->type */
  695. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
  696. jit->seen |= SEEN_RET0;
  697. /* lg %r1,<d(dev)>(%r2) */
  698. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  699. /* ltgr %r1,%r1 */
  700. EMIT4(0xb9020011);
  701. /* jz <ret0> */
  702. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  703. /* lhi %r5,0 */
  704. EMIT4(0xa7580000);
  705. /* icm %r5,3,<d(type)>(%r1) */
  706. EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
  707. break;
  708. case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
  709. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
  710. /* l %r5,<d(rxhash)>(%r2) */
  711. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
  712. break;
  713. case BPF_S_ANC_VLAN_TAG:
  714. case BPF_S_ANC_VLAN_TAG_PRESENT:
  715. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  716. BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
  717. /* lhi %r5,0 */
  718. EMIT4(0xa7580000);
  719. /* icm %r5,3,<d(vlan_tci)>(%r2) */
  720. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
  721. if (filter->code == BPF_S_ANC_VLAN_TAG) {
  722. /* nill %r5,0xefff */
  723. EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
  724. } else {
  725. /* nill %r5,0x1000 */
  726. EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
  727. /* srl %r5,12 */
  728. EMIT4_DISP(0x88500000, 12);
  729. }
  730. break;
  731. case BPF_S_ANC_PKTTYPE:
  732. if (pkt_type_offset < 0)
  733. goto out;
  734. /* lhi %r5,0 */
  735. EMIT4(0xa7580000);
  736. /* ic %r5,<d(pkt_type_offset)>(%r2) */
  737. EMIT4_DISP(0x43502000, pkt_type_offset);
  738. /* srl %r5,5 */
  739. EMIT4_DISP(0x88500000, 5);
  740. break;
  741. case BPF_S_ANC_CPU: /* A = smp_processor_id() */
  742. #ifdef CONFIG_SMP
  743. /* l %r5,<d(cpu_nr)> */
  744. EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
  745. #else
  746. /* lhi %r5,0 */
  747. EMIT4(0xa7580000);
  748. #endif
  749. break;
  750. default: /* too complex, give up */
  751. goto out;
  752. }
  753. addrs[i] = jit->prg - jit->start;
  754. return 0;
  755. out:
  756. return -1;
  757. }
  758. /*
  759. * Note: for security reasons, bpf code will follow a randomly
  760. * sized amount of illegal instructions.
  761. */
  762. struct bpf_binary_header {
  763. unsigned int pages;
  764. u8 image[];
  765. };
  766. static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
  767. u8 **image_ptr)
  768. {
  769. struct bpf_binary_header *header;
  770. unsigned int sz, hole;
  771. /* Most BPF filters are really small, but if some of them fill a page,
  772. * allow at least 128 extra bytes for illegal instructions.
  773. */
  774. sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
  775. header = module_alloc(sz);
  776. if (!header)
  777. return NULL;
  778. memset(header, 0, sz);
  779. header->pages = sz / PAGE_SIZE;
  780. hole = sz - bpfsize + sizeof(*header);
  781. /* Insert random number of illegal instructions before BPF code
  782. * and make sure the first instruction starts at an even address.
  783. */
  784. *image_ptr = &header->image[(prandom_u32() % hole) & -2];
  785. return header;
  786. }
  787. void bpf_jit_compile(struct sk_filter *fp)
  788. {
  789. struct bpf_binary_header *header = NULL;
  790. unsigned long size, prg_len, lit_len;
  791. struct bpf_jit jit, cjit;
  792. unsigned int *addrs;
  793. int pass, i;
  794. if (!bpf_jit_enable)
  795. return;
  796. addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
  797. if (addrs == NULL)
  798. return;
  799. memset(&jit, 0, sizeof(cjit));
  800. memset(&cjit, 0, sizeof(cjit));
  801. for (pass = 0; pass < 10; pass++) {
  802. jit.prg = jit.start;
  803. jit.lit = jit.mid;
  804. bpf_jit_prologue(&jit);
  805. bpf_jit_noleaks(&jit, fp->insns);
  806. for (i = 0; i < fp->len; i++) {
  807. if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
  808. i == fp->len - 1))
  809. goto out;
  810. }
  811. bpf_jit_epilogue(&jit);
  812. if (jit.start) {
  813. WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
  814. if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
  815. break;
  816. } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
  817. prg_len = jit.prg - jit.start;
  818. lit_len = jit.lit - jit.mid;
  819. size = prg_len + lit_len;
  820. if (size >= BPF_SIZE_MAX)
  821. goto out;
  822. header = bpf_alloc_binary(size, &jit.start);
  823. if (!header)
  824. goto out;
  825. jit.prg = jit.mid = jit.start + prg_len;
  826. jit.lit = jit.end = jit.start + prg_len + lit_len;
  827. jit.base_ip += (unsigned long) jit.start;
  828. jit.exit_ip += (unsigned long) jit.start;
  829. jit.ret0_ip += (unsigned long) jit.start;
  830. }
  831. cjit = jit;
  832. }
  833. if (bpf_jit_enable > 1) {
  834. bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
  835. if (jit.start)
  836. print_fn_code(jit.start, jit.mid - jit.start);
  837. }
  838. if (jit.start) {
  839. set_memory_ro((unsigned long)header, header->pages);
  840. fp->bpf_func = (void *) jit.start;
  841. }
  842. out:
  843. kfree(addrs);
  844. }
  845. void bpf_jit_free(struct sk_filter *fp)
  846. {
  847. unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
  848. struct bpf_binary_header *header = (void *)addr;
  849. if (fp->bpf_func == sk_run_filter)
  850. return;
  851. set_memory_rw(addr, header->pages);
  852. module_free(NULL, header);
  853. }