cast6-avx-x86_64-asm_64.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. .file "cast6-avx-x86_64-asm_64.S"
  26. .extern cast6_s1
  27. .extern cast6_s2
  28. .extern cast6_s3
  29. .extern cast6_s4
  30. /* structure of crypto context */
  31. #define km 0
  32. #define kr (12*4*4)
  33. /* s-boxes */
  34. #define s1 cast6_s1
  35. #define s2 cast6_s2
  36. #define s3 cast6_s3
  37. #define s4 cast6_s4
  38. /**********************************************************************
  39. 8-way AVX cast6
  40. **********************************************************************/
  41. #define CTX %rdi
  42. #define RA1 %xmm0
  43. #define RB1 %xmm1
  44. #define RC1 %xmm2
  45. #define RD1 %xmm3
  46. #define RA2 %xmm4
  47. #define RB2 %xmm5
  48. #define RC2 %xmm6
  49. #define RD2 %xmm7
  50. #define RX %xmm8
  51. #define RKM %xmm9
  52. #define RKR %xmm10
  53. #define RKRF %xmm11
  54. #define RKRR %xmm12
  55. #define R32 %xmm13
  56. #define R1ST %xmm14
  57. #define RTMP %xmm15
  58. #define RID1 %rbp
  59. #define RID1d %ebp
  60. #define RID2 %rsi
  61. #define RID2d %esi
  62. #define RGI1 %rdx
  63. #define RGI1bl %dl
  64. #define RGI1bh %dh
  65. #define RGI2 %rcx
  66. #define RGI2bl %cl
  67. #define RGI2bh %ch
  68. #define RGI3 %rax
  69. #define RGI3bl %al
  70. #define RGI3bh %ah
  71. #define RGI4 %rbx
  72. #define RGI4bl %bl
  73. #define RGI4bh %bh
  74. #define RFS1 %r8
  75. #define RFS1d %r8d
  76. #define RFS2 %r9
  77. #define RFS2d %r9d
  78. #define RFS3 %r10
  79. #define RFS3d %r10d
  80. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  81. movzbl src ## bh, RID1d; \
  82. movzbl src ## bl, RID2d; \
  83. shrq $16, src; \
  84. movl s1(, RID1, 4), dst ## d; \
  85. op1 s2(, RID2, 4), dst ## d; \
  86. movzbl src ## bh, RID1d; \
  87. movzbl src ## bl, RID2d; \
  88. interleave_op(il_reg); \
  89. op2 s3(, RID1, 4), dst ## d; \
  90. op3 s4(, RID2, 4), dst ## d;
  91. #define dummy(d) /* do nothing */
  92. #define shr_next(reg) \
  93. shrq $16, reg;
  94. #define F_head(a, x, gi1, gi2, op0) \
  95. op0 a, RKM, x; \
  96. vpslld RKRF, x, RTMP; \
  97. vpsrld RKRR, x, x; \
  98. vpor RTMP, x, x; \
  99. \
  100. vmovq x, gi1; \
  101. vpextrq $1, x, gi2;
  102. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  103. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  104. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  105. \
  106. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  107. shlq $32, RFS2; \
  108. orq RFS1, RFS2; \
  109. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  110. shlq $32, RFS1; \
  111. orq RFS1, RFS3; \
  112. \
  113. vmovq RFS2, x; \
  114. vpinsrq $1, RFS3, x, x;
  115. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  116. F_head(b1, RX, RGI1, RGI2, op0); \
  117. F_head(b2, RX, RGI3, RGI4, op0); \
  118. \
  119. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  120. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  121. \
  122. vpxor a1, RX, a1; \
  123. vpxor a2, RTMP, a2;
  124. #define F1_2(a1, b1, a2, b2) \
  125. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  126. #define F2_2(a1, b1, a2, b2) \
  127. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  128. #define F3_2(a1, b1, a2, b2) \
  129. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  130. #define qop(in, out, f) \
  131. F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
  132. #define get_round_keys(nn) \
  133. vbroadcastss (km+(4*(nn)))(CTX), RKM; \
  134. vpand R1ST, RKR, RKRF; \
  135. vpsubq RKRF, R32, RKRR; \
  136. vpsrldq $1, RKR, RKR;
  137. #define Q(n) \
  138. get_round_keys(4*n+0); \
  139. qop(RD, RC, 1); \
  140. \
  141. get_round_keys(4*n+1); \
  142. qop(RC, RB, 2); \
  143. \
  144. get_round_keys(4*n+2); \
  145. qop(RB, RA, 3); \
  146. \
  147. get_round_keys(4*n+3); \
  148. qop(RA, RD, 1);
  149. #define QBAR(n) \
  150. get_round_keys(4*n+3); \
  151. qop(RA, RD, 1); \
  152. \
  153. get_round_keys(4*n+2); \
  154. qop(RB, RA, 3); \
  155. \
  156. get_round_keys(4*n+1); \
  157. qop(RC, RB, 2); \
  158. \
  159. get_round_keys(4*n+0); \
  160. qop(RD, RC, 1);
  161. #define shuffle(mask) \
  162. vpshufb mask, RKR, RKR;
  163. #define preload_rkr(n, do_mask, mask) \
  164. vbroadcastss .L16_mask, RKR; \
  165. /* add 16-bit rotation to key rotations (mod 32) */ \
  166. vpxor (kr+n*16)(CTX), RKR, RKR; \
  167. do_mask(mask);
  168. #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  169. vpunpckldq x1, x0, t0; \
  170. vpunpckhdq x1, x0, t2; \
  171. vpunpckldq x3, x2, t1; \
  172. vpunpckhdq x3, x2, x3; \
  173. \
  174. vpunpcklqdq t1, t0, x0; \
  175. vpunpckhqdq t1, t0, x1; \
  176. vpunpcklqdq x3, t2, x2; \
  177. vpunpckhqdq x3, t2, x3;
  178. #define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2, rmask) \
  179. vmovdqu (0*4*4)(in), x0; \
  180. vmovdqu (1*4*4)(in), x1; \
  181. vmovdqu (2*4*4)(in), x2; \
  182. vmovdqu (3*4*4)(in), x3; \
  183. vpshufb rmask, x0, x0; \
  184. vpshufb rmask, x1, x1; \
  185. vpshufb rmask, x2, x2; \
  186. vpshufb rmask, x3, x3; \
  187. \
  188. transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
  189. #define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
  190. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  191. \
  192. vpshufb rmask, x0, x0; \
  193. vpshufb rmask, x1, x1; \
  194. vpshufb rmask, x2, x2; \
  195. vpshufb rmask, x3, x3; \
  196. vmovdqu x0, (0*4*4)(out); \
  197. vmovdqu x1, (1*4*4)(out); \
  198. vmovdqu x2, (2*4*4)(out); \
  199. vmovdqu x3, (3*4*4)(out);
  200. #define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
  201. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  202. \
  203. vpshufb rmask, x0, x0; \
  204. vpshufb rmask, x1, x1; \
  205. vpshufb rmask, x2, x2; \
  206. vpshufb rmask, x3, x3; \
  207. vpxor (0*4*4)(out), x0, x0; \
  208. vmovdqu x0, (0*4*4)(out); \
  209. vpxor (1*4*4)(out), x1, x1; \
  210. vmovdqu x1, (1*4*4)(out); \
  211. vpxor (2*4*4)(out), x2, x2; \
  212. vmovdqu x2, (2*4*4)(out); \
  213. vpxor (3*4*4)(out), x3, x3; \
  214. vmovdqu x3, (3*4*4)(out);
  215. .data
  216. .align 16
  217. .Lbswap_mask:
  218. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  219. .Lrkr_enc_Q_Q_QBAR_QBAR:
  220. .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
  221. .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
  222. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  223. .Lrkr_dec_Q_Q_Q_Q:
  224. .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
  225. .Lrkr_dec_Q_Q_QBAR_QBAR:
  226. .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
  227. .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
  228. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  229. .L16_mask:
  230. .byte 16, 16, 16, 16
  231. .L32_mask:
  232. .byte 32, 0, 0, 0
  233. .Lfirst_mask:
  234. .byte 0x1f, 0, 0, 0
  235. .text
  236. .align 16
  237. .global __cast6_enc_blk_8way
  238. .type __cast6_enc_blk_8way,@function;
  239. __cast6_enc_blk_8way:
  240. /* input:
  241. * %rdi: ctx, CTX
  242. * %rsi: dst
  243. * %rdx: src
  244. * %rcx: bool, if true: xor output
  245. */
  246. pushq %rbp;
  247. pushq %rbx;
  248. pushq %rcx;
  249. vmovdqa .Lbswap_mask, RKM;
  250. vmovd .Lfirst_mask, R1ST;
  251. vmovd .L32_mask, R32;
  252. leaq (4*4*4)(%rdx), %rax;
  253. inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  254. inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  255. movq %rsi, %r11;
  256. preload_rkr(0, dummy, none);
  257. Q(0);
  258. Q(1);
  259. Q(2);
  260. Q(3);
  261. preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
  262. Q(4);
  263. Q(5);
  264. QBAR(6);
  265. QBAR(7);
  266. preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
  267. QBAR(8);
  268. QBAR(9);
  269. QBAR(10);
  270. QBAR(11);
  271. popq %rcx;
  272. popq %rbx;
  273. popq %rbp;
  274. vmovdqa .Lbswap_mask, RKM;
  275. leaq (4*4*4)(%r11), %rax;
  276. testb %cl, %cl;
  277. jnz __enc_xor8;
  278. outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  279. outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  280. ret;
  281. __enc_xor8:
  282. outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  283. outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  284. ret;
  285. .align 16
  286. .global cast6_dec_blk_8way
  287. .type cast6_dec_blk_8way,@function;
  288. cast6_dec_blk_8way:
  289. /* input:
  290. * %rdi: ctx, CTX
  291. * %rsi: dst
  292. * %rdx: src
  293. */
  294. pushq %rbp;
  295. pushq %rbx;
  296. vmovdqa .Lbswap_mask, RKM;
  297. vmovd .Lfirst_mask, R1ST;
  298. vmovd .L32_mask, R32;
  299. leaq (4*4*4)(%rdx), %rax;
  300. inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  301. inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  302. movq %rsi, %r11;
  303. preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
  304. Q(11);
  305. Q(10);
  306. Q(9);
  307. Q(8);
  308. preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
  309. Q(7);
  310. Q(6);
  311. QBAR(5);
  312. QBAR(4);
  313. preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
  314. QBAR(3);
  315. QBAR(2);
  316. QBAR(1);
  317. QBAR(0);
  318. popq %rbx;
  319. popq %rbp;
  320. vmovdqa .Lbswap_mask, RKM;
  321. leaq (4*4*4)(%r11), %rax;
  322. outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  323. outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  324. ret;