cast5-avx-x86_64-asm_64.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. .file "cast5-avx-x86_64-asm_64.S"
  26. .extern cast_s1
  27. .extern cast_s2
  28. .extern cast_s3
  29. .extern cast_s4
  30. /* structure of crypto context */
  31. #define km 0
  32. #define kr (16*4)
  33. #define rr ((16*4)+16)
  34. /* s-boxes */
  35. #define s1 cast_s1
  36. #define s2 cast_s2
  37. #define s3 cast_s3
  38. #define s4 cast_s4
  39. /**********************************************************************
  40. 16-way AVX cast5
  41. **********************************************************************/
  42. #define CTX %rdi
  43. #define RL1 %xmm0
  44. #define RR1 %xmm1
  45. #define RL2 %xmm2
  46. #define RR2 %xmm3
  47. #define RL3 %xmm4
  48. #define RR3 %xmm5
  49. #define RL4 %xmm6
  50. #define RR4 %xmm7
  51. #define RX %xmm8
  52. #define RKM %xmm9
  53. #define RKR %xmm10
  54. #define RKRF %xmm11
  55. #define RKRR %xmm12
  56. #define R32 %xmm13
  57. #define R1ST %xmm14
  58. #define RTMP %xmm15
  59. #define RID1 %rbp
  60. #define RID1d %ebp
  61. #define RID2 %rsi
  62. #define RID2d %esi
  63. #define RGI1 %rdx
  64. #define RGI1bl %dl
  65. #define RGI1bh %dh
  66. #define RGI2 %rcx
  67. #define RGI2bl %cl
  68. #define RGI2bh %ch
  69. #define RGI3 %rax
  70. #define RGI3bl %al
  71. #define RGI3bh %ah
  72. #define RGI4 %rbx
  73. #define RGI4bl %bl
  74. #define RGI4bh %bh
  75. #define RFS1 %r8
  76. #define RFS1d %r8d
  77. #define RFS2 %r9
  78. #define RFS2d %r9d
  79. #define RFS3 %r10
  80. #define RFS3d %r10d
  81. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  82. movzbl src ## bh, RID1d; \
  83. movzbl src ## bl, RID2d; \
  84. shrq $16, src; \
  85. movl s1(, RID1, 4), dst ## d; \
  86. op1 s2(, RID2, 4), dst ## d; \
  87. movzbl src ## bh, RID1d; \
  88. movzbl src ## bl, RID2d; \
  89. interleave_op(il_reg); \
  90. op2 s3(, RID1, 4), dst ## d; \
  91. op3 s4(, RID2, 4), dst ## d;
  92. #define dummy(d) /* do nothing */
  93. #define shr_next(reg) \
  94. shrq $16, reg;
  95. #define F_head(a, x, gi1, gi2, op0) \
  96. op0 a, RKM, x; \
  97. vpslld RKRF, x, RTMP; \
  98. vpsrld RKRR, x, x; \
  99. vpor RTMP, x, x; \
  100. \
  101. vmovq x, gi1; \
  102. vpextrq $1, x, gi2;
  103. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  104. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  105. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  106. \
  107. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  108. shlq $32, RFS2; \
  109. orq RFS1, RFS2; \
  110. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  111. shlq $32, RFS1; \
  112. orq RFS1, RFS3; \
  113. \
  114. vmovq RFS2, x; \
  115. vpinsrq $1, RFS3, x, x;
  116. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  117. F_head(b1, RX, RGI1, RGI2, op0); \
  118. F_head(b2, RX, RGI3, RGI4, op0); \
  119. \
  120. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  121. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  122. \
  123. vpxor a1, RX, a1; \
  124. vpxor a2, RTMP, a2;
  125. #define F1_2(a1, b1, a2, b2) \
  126. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  127. #define F2_2(a1, b1, a2, b2) \
  128. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  129. #define F3_2(a1, b1, a2, b2) \
  130. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  131. #define subround(a1, b1, a2, b2, f) \
  132. F ## f ## _2(a1, b1, a2, b2);
  133. #define round(l, r, n, f) \
  134. vbroadcastss (km+(4*n))(CTX), RKM; \
  135. vpand R1ST, RKR, RKRF; \
  136. vpsubq RKRF, R32, RKRR; \
  137. vpsrldq $1, RKR, RKR; \
  138. subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
  139. subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
  140. #define enc_preload_rkr() \
  141. vbroadcastss .L16_mask, RKR; \
  142. /* add 16-bit rotation to key rotations (mod 32) */ \
  143. vpxor kr(CTX), RKR, RKR;
  144. #define dec_preload_rkr() \
  145. vbroadcastss .L16_mask, RKR; \
  146. /* add 16-bit rotation to key rotations (mod 32) */ \
  147. vpxor kr(CTX), RKR, RKR; \
  148. vpshufb .Lbswap128_mask, RKR, RKR;
  149. #define transpose_2x4(x0, x1, t0, t1) \
  150. vpunpckldq x1, x0, t0; \
  151. vpunpckhdq x1, x0, t1; \
  152. \
  153. vpunpcklqdq t1, t0, x0; \
  154. vpunpckhqdq t1, t0, x1;
  155. #define inpack_blocks(x0, x1, t0, t1, rmask) \
  156. vpshufb rmask, x0, x0; \
  157. vpshufb rmask, x1, x1; \
  158. \
  159. transpose_2x4(x0, x1, t0, t1)
  160. #define outunpack_blocks(x0, x1, t0, t1, rmask) \
  161. transpose_2x4(x0, x1, t0, t1) \
  162. \
  163. vpshufb rmask, x0, x0; \
  164. vpshufb rmask, x1, x1;
  165. .data
  166. .align 16
  167. .Lbswap_mask:
  168. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  169. .Lbswap128_mask:
  170. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  171. .Lbswap_iv_mask:
  172. .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
  173. .L16_mask:
  174. .byte 16, 16, 16, 16
  175. .L32_mask:
  176. .byte 32, 0, 0, 0
  177. .Lfirst_mask:
  178. .byte 0x1f, 0, 0, 0
  179. .text
  180. .align 16
  181. .type __cast5_enc_blk16,@function;
  182. __cast5_enc_blk16:
  183. /* input:
  184. * %rdi: ctx, CTX
  185. * RL1: blocks 1 and 2
  186. * RR1: blocks 3 and 4
  187. * RL2: blocks 5 and 6
  188. * RR2: blocks 7 and 8
  189. * RL3: blocks 9 and 10
  190. * RR3: blocks 11 and 12
  191. * RL4: blocks 13 and 14
  192. * RR4: blocks 15 and 16
  193. * output:
  194. * RL1: encrypted blocks 1 and 2
  195. * RR1: encrypted blocks 3 and 4
  196. * RL2: encrypted blocks 5 and 6
  197. * RR2: encrypted blocks 7 and 8
  198. * RL3: encrypted blocks 9 and 10
  199. * RR3: encrypted blocks 11 and 12
  200. * RL4: encrypted blocks 13 and 14
  201. * RR4: encrypted blocks 15 and 16
  202. */
  203. pushq %rbp;
  204. pushq %rbx;
  205. vmovdqa .Lbswap_mask, RKM;
  206. vmovd .Lfirst_mask, R1ST;
  207. vmovd .L32_mask, R32;
  208. enc_preload_rkr();
  209. inpack_blocks(RL1, RR1, RTMP, RX, RKM);
  210. inpack_blocks(RL2, RR2, RTMP, RX, RKM);
  211. inpack_blocks(RL3, RR3, RTMP, RX, RKM);
  212. inpack_blocks(RL4, RR4, RTMP, RX, RKM);
  213. round(RL, RR, 0, 1);
  214. round(RR, RL, 1, 2);
  215. round(RL, RR, 2, 3);
  216. round(RR, RL, 3, 1);
  217. round(RL, RR, 4, 2);
  218. round(RR, RL, 5, 3);
  219. round(RL, RR, 6, 1);
  220. round(RR, RL, 7, 2);
  221. round(RL, RR, 8, 3);
  222. round(RR, RL, 9, 1);
  223. round(RL, RR, 10, 2);
  224. round(RR, RL, 11, 3);
  225. movzbl rr(CTX), %eax;
  226. testl %eax, %eax;
  227. jnz __skip_enc;
  228. round(RL, RR, 12, 1);
  229. round(RR, RL, 13, 2);
  230. round(RL, RR, 14, 3);
  231. round(RR, RL, 15, 1);
  232. __skip_enc:
  233. popq %rbx;
  234. popq %rbp;
  235. vmovdqa .Lbswap_mask, RKM;
  236. outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
  237. outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
  238. outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
  239. outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
  240. ret;
  241. .align 16
  242. .type __cast5_dec_blk16,@function;
  243. __cast5_dec_blk16:
  244. /* input:
  245. * %rdi: ctx, CTX
  246. * RL1: encrypted blocks 1 and 2
  247. * RR1: encrypted blocks 3 and 4
  248. * RL2: encrypted blocks 5 and 6
  249. * RR2: encrypted blocks 7 and 8
  250. * RL3: encrypted blocks 9 and 10
  251. * RR3: encrypted blocks 11 and 12
  252. * RL4: encrypted blocks 13 and 14
  253. * RR4: encrypted blocks 15 and 16
  254. * output:
  255. * RL1: decrypted blocks 1 and 2
  256. * RR1: decrypted blocks 3 and 4
  257. * RL2: decrypted blocks 5 and 6
  258. * RR2: decrypted blocks 7 and 8
  259. * RL3: decrypted blocks 9 and 10
  260. * RR3: decrypted blocks 11 and 12
  261. * RL4: decrypted blocks 13 and 14
  262. * RR4: decrypted blocks 15 and 16
  263. */
  264. pushq %rbp;
  265. pushq %rbx;
  266. vmovdqa .Lbswap_mask, RKM;
  267. vmovd .Lfirst_mask, R1ST;
  268. vmovd .L32_mask, R32;
  269. dec_preload_rkr();
  270. inpack_blocks(RL1, RR1, RTMP, RX, RKM);
  271. inpack_blocks(RL2, RR2, RTMP, RX, RKM);
  272. inpack_blocks(RL3, RR3, RTMP, RX, RKM);
  273. inpack_blocks(RL4, RR4, RTMP, RX, RKM);
  274. movzbl rr(CTX), %eax;
  275. testl %eax, %eax;
  276. jnz __skip_dec;
  277. round(RL, RR, 15, 1);
  278. round(RR, RL, 14, 3);
  279. round(RL, RR, 13, 2);
  280. round(RR, RL, 12, 1);
  281. __dec_tail:
  282. round(RL, RR, 11, 3);
  283. round(RR, RL, 10, 2);
  284. round(RL, RR, 9, 1);
  285. round(RR, RL, 8, 3);
  286. round(RL, RR, 7, 2);
  287. round(RR, RL, 6, 1);
  288. round(RL, RR, 5, 3);
  289. round(RR, RL, 4, 2);
  290. round(RL, RR, 3, 1);
  291. round(RR, RL, 2, 3);
  292. round(RL, RR, 1, 2);
  293. round(RR, RL, 0, 1);
  294. vmovdqa .Lbswap_mask, RKM;
  295. popq %rbx;
  296. popq %rbp;
  297. outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
  298. outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
  299. outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
  300. outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
  301. ret;
  302. __skip_dec:
  303. vpsrldq $4, RKR, RKR;
  304. jmp __dec_tail;
  305. .align 16
  306. .global cast5_ecb_enc_16way
  307. .type cast5_ecb_enc_16way,@function;
  308. cast5_ecb_enc_16way:
  309. /* input:
  310. * %rdi: ctx, CTX
  311. * %rsi: dst
  312. * %rdx: src
  313. */
  314. movq %rsi, %r11;
  315. vmovdqu (0*4*4)(%rdx), RL1;
  316. vmovdqu (1*4*4)(%rdx), RR1;
  317. vmovdqu (2*4*4)(%rdx), RL2;
  318. vmovdqu (3*4*4)(%rdx), RR2;
  319. vmovdqu (4*4*4)(%rdx), RL3;
  320. vmovdqu (5*4*4)(%rdx), RR3;
  321. vmovdqu (6*4*4)(%rdx), RL4;
  322. vmovdqu (7*4*4)(%rdx), RR4;
  323. call __cast5_enc_blk16;
  324. vmovdqu RR1, (0*4*4)(%r11);
  325. vmovdqu RL1, (1*4*4)(%r11);
  326. vmovdqu RR2, (2*4*4)(%r11);
  327. vmovdqu RL2, (3*4*4)(%r11);
  328. vmovdqu RR3, (4*4*4)(%r11);
  329. vmovdqu RL3, (5*4*4)(%r11);
  330. vmovdqu RR4, (6*4*4)(%r11);
  331. vmovdqu RL4, (7*4*4)(%r11);
  332. ret;
  333. .align 16
  334. .global cast5_ecb_dec_16way
  335. .type cast5_ecb_dec_16way,@function;
  336. cast5_ecb_dec_16way:
  337. /* input:
  338. * %rdi: ctx, CTX
  339. * %rsi: dst
  340. * %rdx: src
  341. */
  342. movq %rsi, %r11;
  343. vmovdqu (0*4*4)(%rdx), RL1;
  344. vmovdqu (1*4*4)(%rdx), RR1;
  345. vmovdqu (2*4*4)(%rdx), RL2;
  346. vmovdqu (3*4*4)(%rdx), RR2;
  347. vmovdqu (4*4*4)(%rdx), RL3;
  348. vmovdqu (5*4*4)(%rdx), RR3;
  349. vmovdqu (6*4*4)(%rdx), RL4;
  350. vmovdqu (7*4*4)(%rdx), RR4;
  351. call __cast5_dec_blk16;
  352. vmovdqu RR1, (0*4*4)(%r11);
  353. vmovdqu RL1, (1*4*4)(%r11);
  354. vmovdqu RR2, (2*4*4)(%r11);
  355. vmovdqu RL2, (3*4*4)(%r11);
  356. vmovdqu RR3, (4*4*4)(%r11);
  357. vmovdqu RL3, (5*4*4)(%r11);
  358. vmovdqu RR4, (6*4*4)(%r11);
  359. vmovdqu RL4, (7*4*4)(%r11);
  360. ret;
  361. .align 16
  362. .global cast5_cbc_dec_16way
  363. .type cast5_cbc_dec_16way,@function;
  364. cast5_cbc_dec_16way:
  365. /* input:
  366. * %rdi: ctx, CTX
  367. * %rsi: dst
  368. * %rdx: src
  369. */
  370. pushq %r12;
  371. movq %rsi, %r11;
  372. movq %rdx, %r12;
  373. vmovdqu (0*16)(%rdx), RL1;
  374. vmovdqu (1*16)(%rdx), RR1;
  375. vmovdqu (2*16)(%rdx), RL2;
  376. vmovdqu (3*16)(%rdx), RR2;
  377. vmovdqu (4*16)(%rdx), RL3;
  378. vmovdqu (5*16)(%rdx), RR3;
  379. vmovdqu (6*16)(%rdx), RL4;
  380. vmovdqu (7*16)(%rdx), RR4;
  381. call __cast5_dec_blk16;
  382. /* xor with src */
  383. vmovq (%r12), RX;
  384. vpshufd $0x4f, RX, RX;
  385. vpxor RX, RR1, RR1;
  386. vpxor 0*16+8(%r12), RL1, RL1;
  387. vpxor 1*16+8(%r12), RR2, RR2;
  388. vpxor 2*16+8(%r12), RL2, RL2;
  389. vpxor 3*16+8(%r12), RR3, RR3;
  390. vpxor 4*16+8(%r12), RL3, RL3;
  391. vpxor 5*16+8(%r12), RR4, RR4;
  392. vpxor 6*16+8(%r12), RL4, RL4;
  393. vmovdqu RR1, (0*16)(%r11);
  394. vmovdqu RL1, (1*16)(%r11);
  395. vmovdqu RR2, (2*16)(%r11);
  396. vmovdqu RL2, (3*16)(%r11);
  397. vmovdqu RR3, (4*16)(%r11);
  398. vmovdqu RL3, (5*16)(%r11);
  399. vmovdqu RR4, (6*16)(%r11);
  400. vmovdqu RL4, (7*16)(%r11);
  401. popq %r12;
  402. ret;
  403. .align 16
  404. .global cast5_ctr_16way
  405. .type cast5_ctr_16way,@function;
  406. cast5_ctr_16way:
  407. /* input:
  408. * %rdi: ctx, CTX
  409. * %rsi: dst
  410. * %rdx: src
  411. * %rcx: iv (big endian, 64bit)
  412. */
  413. pushq %r12;
  414. movq %rsi, %r11;
  415. movq %rdx, %r12;
  416. vpcmpeqd RTMP, RTMP, RTMP;
  417. vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
  418. vpcmpeqd RKR, RKR, RKR;
  419. vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
  420. vmovdqa .Lbswap_iv_mask, R1ST;
  421. vmovdqa .Lbswap128_mask, RKM;
  422. /* load IV and byteswap */
  423. vmovq (%rcx), RX;
  424. vpshufb R1ST, RX, RX;
  425. /* construct IVs */
  426. vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
  427. vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
  428. vpsubq RKR, RX, RX;
  429. vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
  430. vpsubq RKR, RX, RX;
  431. vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
  432. vpsubq RKR, RX, RX;
  433. vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
  434. vpsubq RKR, RX, RX;
  435. vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
  436. vpsubq RKR, RX, RX;
  437. vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
  438. vpsubq RKR, RX, RX;
  439. vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
  440. vpsubq RKR, RX, RX;
  441. vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
  442. /* store last IV */
  443. vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
  444. vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
  445. vmovq RX, (%rcx);
  446. call __cast5_enc_blk16;
  447. /* dst = src ^ iv */
  448. vpxor (0*16)(%r12), RR1, RR1;
  449. vpxor (1*16)(%r12), RL1, RL1;
  450. vpxor (2*16)(%r12), RR2, RR2;
  451. vpxor (3*16)(%r12), RL2, RL2;
  452. vpxor (4*16)(%r12), RR3, RR3;
  453. vpxor (5*16)(%r12), RL3, RL3;
  454. vpxor (6*16)(%r12), RR4, RR4;
  455. vpxor (7*16)(%r12), RL4, RL4;
  456. vmovdqu RR1, (0*16)(%r11);
  457. vmovdqu RL1, (1*16)(%r11);
  458. vmovdqu RR2, (2*16)(%r11);
  459. vmovdqu RL2, (3*16)(%r11);
  460. vmovdqu RR3, (4*16)(%r11);
  461. vmovdqu RL3, (5*16)(%r11);
  462. vmovdqu RR4, (6*16)(%r11);
  463. vmovdqu RL4, (7*16)(%r11);
  464. popq %r12;
  465. ret;