cast5-avx-x86_64-asm_64.S 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. /*
  2. * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. .file "cast5-avx-x86_64-asm_64.S"
  26. .extern cast5_s1
  27. .extern cast5_s2
  28. .extern cast5_s3
  29. .extern cast5_s4
  30. /* structure of crypto context */
  31. #define km 0
  32. #define kr (16*4)
  33. #define rr ((16*4)+16)
  34. /* s-boxes */
  35. #define s1 cast5_s1
  36. #define s2 cast5_s2
  37. #define s3 cast5_s3
  38. #define s4 cast5_s4
  39. /**********************************************************************
  40. 16-way AVX cast5
  41. **********************************************************************/
  42. #define CTX %rdi
  43. #define RL1 %xmm0
  44. #define RR1 %xmm1
  45. #define RL2 %xmm2
  46. #define RR2 %xmm3
  47. #define RL3 %xmm4
  48. #define RR3 %xmm5
  49. #define RL4 %xmm6
  50. #define RR4 %xmm7
  51. #define RX %xmm8
  52. #define RKM %xmm9
  53. #define RKR %xmm10
  54. #define RKRF %xmm11
  55. #define RKRR %xmm12
  56. #define R32 %xmm13
  57. #define R1ST %xmm14
  58. #define RTMP %xmm15
  59. #define RID1 %rbp
  60. #define RID1d %ebp
  61. #define RID2 %rsi
  62. #define RID2d %esi
  63. #define RGI1 %rdx
  64. #define RGI1bl %dl
  65. #define RGI1bh %dh
  66. #define RGI2 %rcx
  67. #define RGI2bl %cl
  68. #define RGI2bh %ch
  69. #define RGI3 %rax
  70. #define RGI3bl %al
  71. #define RGI3bh %ah
  72. #define RGI4 %rbx
  73. #define RGI4bl %bl
  74. #define RGI4bh %bh
  75. #define RFS1 %r8
  76. #define RFS1d %r8d
  77. #define RFS2 %r9
  78. #define RFS2d %r9d
  79. #define RFS3 %r10
  80. #define RFS3d %r10d
  81. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  82. movzbl src ## bh, RID1d; \
  83. movzbl src ## bl, RID2d; \
  84. shrq $16, src; \
  85. movl s1(, RID1, 4), dst ## d; \
  86. op1 s2(, RID2, 4), dst ## d; \
  87. movzbl src ## bh, RID1d; \
  88. movzbl src ## bl, RID2d; \
  89. interleave_op(il_reg); \
  90. op2 s3(, RID1, 4), dst ## d; \
  91. op3 s4(, RID2, 4), dst ## d;
  92. #define dummy(d) /* do nothing */
  93. #define shr_next(reg) \
  94. shrq $16, reg;
  95. #define F_head(a, x, gi1, gi2, op0) \
  96. op0 a, RKM, x; \
  97. vpslld RKRF, x, RTMP; \
  98. vpsrld RKRR, x, x; \
  99. vpor RTMP, x, x; \
  100. \
  101. vmovq x, gi1; \
  102. vpextrq $1, x, gi2;
  103. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  104. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  105. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  106. \
  107. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  108. shlq $32, RFS2; \
  109. orq RFS1, RFS2; \
  110. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  111. shlq $32, RFS1; \
  112. orq RFS1, RFS3; \
  113. \
  114. vmovq RFS2, x; \
  115. vpinsrq $1, RFS3, x, x;
  116. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  117. F_head(b1, RX, RGI1, RGI2, op0); \
  118. F_head(b2, RX, RGI3, RGI4, op0); \
  119. \
  120. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  121. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  122. \
  123. vpxor a1, RX, a1; \
  124. vpxor a2, RTMP, a2;
  125. #define F1_2(a1, b1, a2, b2) \
  126. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  127. #define F2_2(a1, b1, a2, b2) \
  128. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  129. #define F3_2(a1, b1, a2, b2) \
  130. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  131. #define subround(a1, b1, a2, b2, f) \
  132. F ## f ## _2(a1, b1, a2, b2);
  133. #define round(l, r, n, f) \
  134. vbroadcastss (km+(4*n))(CTX), RKM; \
  135. vpand R1ST, RKR, RKRF; \
  136. vpsubq RKRF, R32, RKRR; \
  137. vpsrldq $1, RKR, RKR; \
  138. subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
  139. subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
  140. #define enc_preload_rkr() \
  141. vbroadcastss .L16_mask, RKR; \
  142. /* add 16-bit rotation to key rotations (mod 32) */ \
  143. vpxor kr(CTX), RKR, RKR;
  144. #define dec_preload_rkr() \
  145. vbroadcastss .L16_mask, RKR; \
  146. /* add 16-bit rotation to key rotations (mod 32) */ \
  147. vpxor kr(CTX), RKR, RKR; \
  148. vpshufb .Lbswap128_mask, RKR, RKR;
  149. #define transpose_2x4(x0, x1, t0, t1) \
  150. vpunpckldq x1, x0, t0; \
  151. vpunpckhdq x1, x0, t1; \
  152. \
  153. vpunpcklqdq t1, t0, x0; \
  154. vpunpckhqdq t1, t0, x1;
  155. #define inpack_blocks(in, x0, x1, t0, t1, rmask) \
  156. vmovdqu (0*4*4)(in), x0; \
  157. vmovdqu (1*4*4)(in), x1; \
  158. vpshufb rmask, x0, x0; \
  159. vpshufb rmask, x1, x1; \
  160. \
  161. transpose_2x4(x0, x1, t0, t1)
  162. #define outunpack_blocks(out, x0, x1, t0, t1, rmask) \
  163. transpose_2x4(x0, x1, t0, t1) \
  164. \
  165. vpshufb rmask, x0, x0; \
  166. vpshufb rmask, x1, x1; \
  167. vmovdqu x0, (0*4*4)(out); \
  168. vmovdqu x1, (1*4*4)(out);
  169. #define outunpack_xor_blocks(out, x0, x1, t0, t1, rmask) \
  170. transpose_2x4(x0, x1, t0, t1) \
  171. \
  172. vpshufb rmask, x0, x0; \
  173. vpshufb rmask, x1, x1; \
  174. vpxor (0*4*4)(out), x0, x0; \
  175. vmovdqu x0, (0*4*4)(out); \
  176. vpxor (1*4*4)(out), x1, x1; \
  177. vmovdqu x1, (1*4*4)(out);
  178. .data
  179. .align 16
  180. .Lbswap_mask:
  181. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  182. .Lbswap128_mask:
  183. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  184. .L16_mask:
  185. .byte 16, 16, 16, 16
  186. .L32_mask:
  187. .byte 32, 0, 0, 0
  188. .Lfirst_mask:
  189. .byte 0x1f, 0, 0, 0
  190. .text
  191. .align 16
  192. .global __cast5_enc_blk_16way
  193. .type __cast5_enc_blk_16way,@function;
  194. __cast5_enc_blk_16way:
  195. /* input:
  196. * %rdi: ctx, CTX
  197. * %rsi: dst
  198. * %rdx: src
  199. * %rcx: bool, if true: xor output
  200. */
  201. pushq %rbp;
  202. pushq %rbx;
  203. pushq %rcx;
  204. vmovdqa .Lbswap_mask, RKM;
  205. vmovd .Lfirst_mask, R1ST;
  206. vmovd .L32_mask, R32;
  207. enc_preload_rkr();
  208. leaq 1*(2*4*4)(%rdx), %rax;
  209. inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
  210. inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
  211. leaq 2*(2*4*4)(%rdx), %rax;
  212. inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
  213. leaq 3*(2*4*4)(%rdx), %rax;
  214. inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
  215. movq %rsi, %r11;
  216. round(RL, RR, 0, 1);
  217. round(RR, RL, 1, 2);
  218. round(RL, RR, 2, 3);
  219. round(RR, RL, 3, 1);
  220. round(RL, RR, 4, 2);
  221. round(RR, RL, 5, 3);
  222. round(RL, RR, 6, 1);
  223. round(RR, RL, 7, 2);
  224. round(RL, RR, 8, 3);
  225. round(RR, RL, 9, 1);
  226. round(RL, RR, 10, 2);
  227. round(RR, RL, 11, 3);
  228. movzbl rr(CTX), %eax;
  229. testl %eax, %eax;
  230. jnz __skip_enc;
  231. round(RL, RR, 12, 1);
  232. round(RR, RL, 13, 2);
  233. round(RL, RR, 14, 3);
  234. round(RR, RL, 15, 1);
  235. __skip_enc:
  236. popq %rcx;
  237. popq %rbx;
  238. popq %rbp;
  239. vmovdqa .Lbswap_mask, RKM;
  240. leaq 1*(2*4*4)(%r11), %rax;
  241. testb %cl, %cl;
  242. jnz __enc_xor16;
  243. outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
  244. outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
  245. leaq 2*(2*4*4)(%r11), %rax;
  246. outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
  247. leaq 3*(2*4*4)(%r11), %rax;
  248. outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
  249. ret;
  250. __enc_xor16:
  251. outunpack_xor_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
  252. outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
  253. leaq 2*(2*4*4)(%r11), %rax;
  254. outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
  255. leaq 3*(2*4*4)(%r11), %rax;
  256. outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
  257. ret;
  258. .align 16
  259. .global cast5_dec_blk_16way
  260. .type cast5_dec_blk_16way,@function;
  261. cast5_dec_blk_16way:
  262. /* input:
  263. * %rdi: ctx, CTX
  264. * %rsi: dst
  265. * %rdx: src
  266. */
  267. pushq %rbp;
  268. pushq %rbx;
  269. vmovdqa .Lbswap_mask, RKM;
  270. vmovd .Lfirst_mask, R1ST;
  271. vmovd .L32_mask, R32;
  272. dec_preload_rkr();
  273. leaq 1*(2*4*4)(%rdx), %rax;
  274. inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
  275. inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
  276. leaq 2*(2*4*4)(%rdx), %rax;
  277. inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
  278. leaq 3*(2*4*4)(%rdx), %rax;
  279. inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
  280. movq %rsi, %r11;
  281. movzbl rr(CTX), %eax;
  282. testl %eax, %eax;
  283. jnz __skip_dec;
  284. round(RL, RR, 15, 1);
  285. round(RR, RL, 14, 3);
  286. round(RL, RR, 13, 2);
  287. round(RR, RL, 12, 1);
  288. __dec_tail:
  289. round(RL, RR, 11, 3);
  290. round(RR, RL, 10, 2);
  291. round(RL, RR, 9, 1);
  292. round(RR, RL, 8, 3);
  293. round(RL, RR, 7, 2);
  294. round(RR, RL, 6, 1);
  295. round(RL, RR, 5, 3);
  296. round(RR, RL, 4, 2);
  297. round(RL, RR, 3, 1);
  298. round(RR, RL, 2, 3);
  299. round(RL, RR, 1, 2);
  300. round(RR, RL, 0, 1);
  301. vmovdqa .Lbswap_mask, RKM;
  302. popq %rbx;
  303. popq %rbp;
  304. leaq 1*(2*4*4)(%r11), %rax;
  305. outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
  306. outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
  307. leaq 2*(2*4*4)(%r11), %rax;
  308. outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
  309. leaq 3*(2*4*4)(%r11), %rax;
  310. outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
  311. ret;
  312. __skip_dec:
  313. vpsrldq $4, RKR, RKR;
  314. jmp __dec_tail;