camellia-aesni-avx2-asm_64.S 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * x86_64/AVX2/AES-NI assembler implementation of Camellia
  3. *
  4. * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. */
  12. #include <linux/linkage.h>
  13. #define CAMELLIA_TABLE_BYTE_LEN 272
  14. /* struct camellia_ctx: */
  15. #define key_table 0
  16. #define key_length CAMELLIA_TABLE_BYTE_LEN
  17. /* register macros */
  18. #define CTX %rdi
  19. #define RIO %r8
  20. /**********************************************************************
  21. helper macros
  22. **********************************************************************/
  23. #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \
  24. vpand x, mask4bit, tmp0; \
  25. vpandn x, mask4bit, x; \
  26. vpsrld $4, x, x; \
  27. \
  28. vpshufb tmp0, lo_t, tmp0; \
  29. vpshufb x, hi_t, x; \
  30. vpxor tmp0, x, x;
  31. #define ymm0_x xmm0
  32. #define ymm1_x xmm1
  33. #define ymm2_x xmm2
  34. #define ymm3_x xmm3
  35. #define ymm4_x xmm4
  36. #define ymm5_x xmm5
  37. #define ymm6_x xmm6
  38. #define ymm7_x xmm7
  39. #define ymm8_x xmm8
  40. #define ymm9_x xmm9
  41. #define ymm10_x xmm10
  42. #define ymm11_x xmm11
  43. #define ymm12_x xmm12
  44. #define ymm13_x xmm13
  45. #define ymm14_x xmm14
  46. #define ymm15_x xmm15
  47. /**********************************************************************
  48. 32-way camellia
  49. **********************************************************************/
  50. /*
  51. * IN:
  52. * x0..x7: byte-sliced AB state
  53. * mem_cd: register pointer storing CD state
  54. * key: index for key material
  55. * OUT:
  56. * x0..x7: new byte-sliced CD state
  57. */
  58. #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \
  59. t7, mem_cd, key) \
  60. /* \
  61. * S-function with AES subbytes \
  62. */ \
  63. vbroadcasti128 .Linv_shift_row, t4; \
  64. vpbroadcastd .L0f0f0f0f, t7; \
  65. vbroadcasti128 .Lpre_tf_lo_s1, t5; \
  66. vbroadcasti128 .Lpre_tf_hi_s1, t6; \
  67. vbroadcasti128 .Lpre_tf_lo_s4, t2; \
  68. vbroadcasti128 .Lpre_tf_hi_s4, t3; \
  69. \
  70. /* AES inverse shift rows */ \
  71. vpshufb t4, x0, x0; \
  72. vpshufb t4, x7, x7; \
  73. vpshufb t4, x3, x3; \
  74. vpshufb t4, x6, x6; \
  75. vpshufb t4, x2, x2; \
  76. vpshufb t4, x5, x5; \
  77. vpshufb t4, x1, x1; \
  78. vpshufb t4, x4, x4; \
  79. \
  80. /* prefilter sboxes 1, 2 and 3 */ \
  81. /* prefilter sbox 4 */ \
  82. filter_8bit(x0, t5, t6, t7, t4); \
  83. filter_8bit(x7, t5, t6, t7, t4); \
  84. vextracti128 $1, x0, t0##_x; \
  85. vextracti128 $1, x7, t1##_x; \
  86. filter_8bit(x3, t2, t3, t7, t4); \
  87. filter_8bit(x6, t2, t3, t7, t4); \
  88. vextracti128 $1, x3, t3##_x; \
  89. vextracti128 $1, x6, t2##_x; \
  90. filter_8bit(x2, t5, t6, t7, t4); \
  91. filter_8bit(x5, t5, t6, t7, t4); \
  92. filter_8bit(x1, t5, t6, t7, t4); \
  93. filter_8bit(x4, t5, t6, t7, t4); \
  94. \
  95. vpxor t4##_x, t4##_x, t4##_x; \
  96. \
  97. /* AES subbytes + AES shift rows */ \
  98. vextracti128 $1, x2, t6##_x; \
  99. vextracti128 $1, x5, t5##_x; \
  100. vaesenclast t4##_x, x0##_x, x0##_x; \
  101. vaesenclast t4##_x, t0##_x, t0##_x; \
  102. vinserti128 $1, t0##_x, x0, x0; \
  103. vaesenclast t4##_x, x7##_x, x7##_x; \
  104. vaesenclast t4##_x, t1##_x, t1##_x; \
  105. vinserti128 $1, t1##_x, x7, x7; \
  106. vaesenclast t4##_x, x3##_x, x3##_x; \
  107. vaesenclast t4##_x, t3##_x, t3##_x; \
  108. vinserti128 $1, t3##_x, x3, x3; \
  109. vaesenclast t4##_x, x6##_x, x6##_x; \
  110. vaesenclast t4##_x, t2##_x, t2##_x; \
  111. vinserti128 $1, t2##_x, x6, x6; \
  112. vextracti128 $1, x1, t3##_x; \
  113. vextracti128 $1, x4, t2##_x; \
  114. vbroadcasti128 .Lpost_tf_lo_s1, t0; \
  115. vbroadcasti128 .Lpost_tf_hi_s1, t1; \
  116. vaesenclast t4##_x, x2##_x, x2##_x; \
  117. vaesenclast t4##_x, t6##_x, t6##_x; \
  118. vinserti128 $1, t6##_x, x2, x2; \
  119. vaesenclast t4##_x, x5##_x, x5##_x; \
  120. vaesenclast t4##_x, t5##_x, t5##_x; \
  121. vinserti128 $1, t5##_x, x5, x5; \
  122. vaesenclast t4##_x, x1##_x, x1##_x; \
  123. vaesenclast t4##_x, t3##_x, t3##_x; \
  124. vinserti128 $1, t3##_x, x1, x1; \
  125. vaesenclast t4##_x, x4##_x, x4##_x; \
  126. vaesenclast t4##_x, t2##_x, t2##_x; \
  127. vinserti128 $1, t2##_x, x4, x4; \
  128. \
  129. /* postfilter sboxes 1 and 4 */ \
  130. vbroadcasti128 .Lpost_tf_lo_s3, t2; \
  131. vbroadcasti128 .Lpost_tf_hi_s3, t3; \
  132. filter_8bit(x0, t0, t1, t7, t6); \
  133. filter_8bit(x7, t0, t1, t7, t6); \
  134. filter_8bit(x3, t0, t1, t7, t6); \
  135. filter_8bit(x6, t0, t1, t7, t6); \
  136. \
  137. /* postfilter sbox 3 */ \
  138. vbroadcasti128 .Lpost_tf_lo_s2, t4; \
  139. vbroadcasti128 .Lpost_tf_hi_s2, t5; \
  140. filter_8bit(x2, t2, t3, t7, t6); \
  141. filter_8bit(x5, t2, t3, t7, t6); \
  142. \
  143. vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \
  144. \
  145. /* postfilter sbox 2 */ \
  146. filter_8bit(x1, t4, t5, t7, t2); \
  147. filter_8bit(x4, t4, t5, t7, t2); \
  148. vpxor t7, t7, t7; \
  149. \
  150. vpsrldq $1, t0, t1; \
  151. vpsrldq $2, t0, t2; \
  152. vpshufb t7, t1, t1; \
  153. vpsrldq $3, t0, t3; \
  154. \
  155. /* P-function */ \
  156. vpxor x5, x0, x0; \
  157. vpxor x6, x1, x1; \
  158. vpxor x7, x2, x2; \
  159. vpxor x4, x3, x3; \
  160. \
  161. vpshufb t7, t2, t2; \
  162. vpsrldq $4, t0, t4; \
  163. vpshufb t7, t3, t3; \
  164. vpsrldq $5, t0, t5; \
  165. vpshufb t7, t4, t4; \
  166. \
  167. vpxor x2, x4, x4; \
  168. vpxor x3, x5, x5; \
  169. vpxor x0, x6, x6; \
  170. vpxor x1, x7, x7; \
  171. \
  172. vpsrldq $6, t0, t6; \
  173. vpshufb t7, t5, t5; \
  174. vpshufb t7, t6, t6; \
  175. \
  176. vpxor x7, x0, x0; \
  177. vpxor x4, x1, x1; \
  178. vpxor x5, x2, x2; \
  179. vpxor x6, x3, x3; \
  180. \
  181. vpxor x3, x4, x4; \
  182. vpxor x0, x5, x5; \
  183. vpxor x1, x6, x6; \
  184. vpxor x2, x7, x7; /* note: high and low parts swapped */ \
  185. \
  186. /* Add key material and result to CD (x becomes new CD) */ \
  187. \
  188. vpxor t6, x1, x1; \
  189. vpxor 5 * 32(mem_cd), x1, x1; \
  190. \
  191. vpsrldq $7, t0, t6; \
  192. vpshufb t7, t0, t0; \
  193. vpshufb t7, t6, t7; \
  194. \
  195. vpxor t7, x0, x0; \
  196. vpxor 4 * 32(mem_cd), x0, x0; \
  197. \
  198. vpxor t5, x2, x2; \
  199. vpxor 6 * 32(mem_cd), x2, x2; \
  200. \
  201. vpxor t4, x3, x3; \
  202. vpxor 7 * 32(mem_cd), x3, x3; \
  203. \
  204. vpxor t3, x4, x4; \
  205. vpxor 0 * 32(mem_cd), x4, x4; \
  206. \
  207. vpxor t2, x5, x5; \
  208. vpxor 1 * 32(mem_cd), x5, x5; \
  209. \
  210. vpxor t1, x6, x6; \
  211. vpxor 2 * 32(mem_cd), x6, x6; \
  212. \
  213. vpxor t0, x7, x7; \
  214. vpxor 3 * 32(mem_cd), x7, x7;
  215. /*
  216. * Size optimization... with inlined roundsm32 binary would be over 5 times
  217. * larger and would only marginally faster.
  218. */
  219. .align 8
  220. roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
  221. roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  222. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
  223. %rcx, (%r9));
  224. ret;
  225. ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
  226. .align 8
  227. roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
  228. roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
  229. %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
  230. %rax, (%r9));
  231. ret;
  232. ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
  233. /*
  234. * IN/OUT:
  235. * x0..x7: byte-sliced AB state preloaded
  236. * mem_ab: byte-sliced AB state in memory
  237. * mem_cb: byte-sliced CD state in memory
  238. */
  239. #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  240. y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
  241. leaq (key_table + (i) * 8)(CTX), %r9; \
  242. call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
  243. \
  244. vmovdqu x0, 4 * 32(mem_cd); \
  245. vmovdqu x1, 5 * 32(mem_cd); \
  246. vmovdqu x2, 6 * 32(mem_cd); \
  247. vmovdqu x3, 7 * 32(mem_cd); \
  248. vmovdqu x4, 0 * 32(mem_cd); \
  249. vmovdqu x5, 1 * 32(mem_cd); \
  250. vmovdqu x6, 2 * 32(mem_cd); \
  251. vmovdqu x7, 3 * 32(mem_cd); \
  252. \
  253. leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
  254. call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
  255. \
  256. store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
  257. #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */
  258. #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \
  259. /* Store new AB state */ \
  260. vmovdqu x4, 4 * 32(mem_ab); \
  261. vmovdqu x5, 5 * 32(mem_ab); \
  262. vmovdqu x6, 6 * 32(mem_ab); \
  263. vmovdqu x7, 7 * 32(mem_ab); \
  264. vmovdqu x0, 0 * 32(mem_ab); \
  265. vmovdqu x1, 1 * 32(mem_ab); \
  266. vmovdqu x2, 2 * 32(mem_ab); \
  267. vmovdqu x3, 3 * 32(mem_ab);
  268. #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  269. y6, y7, mem_ab, mem_cd, i) \
  270. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  271. y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \
  272. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  273. y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \
  274. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  275. y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store);
  276. #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  277. y6, y7, mem_ab, mem_cd, i) \
  278. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  279. y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \
  280. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  281. y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \
  282. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  283. y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store);
  284. /*
  285. * IN:
  286. * v0..3: byte-sliced 32-bit integers
  287. * OUT:
  288. * v0..3: (IN <<< 1)
  289. */
  290. #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \
  291. vpcmpgtb v0, zero, t0; \
  292. vpaddb v0, v0, v0; \
  293. vpabsb t0, t0; \
  294. \
  295. vpcmpgtb v1, zero, t1; \
  296. vpaddb v1, v1, v1; \
  297. vpabsb t1, t1; \
  298. \
  299. vpcmpgtb v2, zero, t2; \
  300. vpaddb v2, v2, v2; \
  301. vpabsb t2, t2; \
  302. \
  303. vpor t0, v1, v1; \
  304. \
  305. vpcmpgtb v3, zero, t0; \
  306. vpaddb v3, v3, v3; \
  307. vpabsb t0, t0; \
  308. \
  309. vpor t1, v2, v2; \
  310. vpor t2, v3, v3; \
  311. vpor t0, v0, v0;
  312. /*
  313. * IN:
  314. * r: byte-sliced AB state in memory
  315. * l: byte-sliced CD state in memory
  316. * OUT:
  317. * x0..x7: new byte-sliced CD state
  318. */
  319. #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \
  320. tt1, tt2, tt3, kll, klr, krl, krr) \
  321. /* \
  322. * t0 = kll; \
  323. * t0 &= ll; \
  324. * lr ^= rol32(t0, 1); \
  325. */ \
  326. vpbroadcastd kll, t0; /* only lowest 32-bit used */ \
  327. vpxor tt0, tt0, tt0; \
  328. vpshufb tt0, t0, t3; \
  329. vpsrldq $1, t0, t0; \
  330. vpshufb tt0, t0, t2; \
  331. vpsrldq $1, t0, t0; \
  332. vpshufb tt0, t0, t1; \
  333. vpsrldq $1, t0, t0; \
  334. vpshufb tt0, t0, t0; \
  335. \
  336. vpand l0, t0, t0; \
  337. vpand l1, t1, t1; \
  338. vpand l2, t2, t2; \
  339. vpand l3, t3, t3; \
  340. \
  341. rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
  342. \
  343. vpxor l4, t0, l4; \
  344. vpbroadcastd krr, t0; /* only lowest 32-bit used */ \
  345. vmovdqu l4, 4 * 32(l); \
  346. vpxor l5, t1, l5; \
  347. vmovdqu l5, 5 * 32(l); \
  348. vpxor l6, t2, l6; \
  349. vmovdqu l6, 6 * 32(l); \
  350. vpxor l7, t3, l7; \
  351. vmovdqu l7, 7 * 32(l); \
  352. \
  353. /* \
  354. * t2 = krr; \
  355. * t2 |= rr; \
  356. * rl ^= t2; \
  357. */ \
  358. \
  359. vpshufb tt0, t0, t3; \
  360. vpsrldq $1, t0, t0; \
  361. vpshufb tt0, t0, t2; \
  362. vpsrldq $1, t0, t0; \
  363. vpshufb tt0, t0, t1; \
  364. vpsrldq $1, t0, t0; \
  365. vpshufb tt0, t0, t0; \
  366. \
  367. vpor 4 * 32(r), t0, t0; \
  368. vpor 5 * 32(r), t1, t1; \
  369. vpor 6 * 32(r), t2, t2; \
  370. vpor 7 * 32(r), t3, t3; \
  371. \
  372. vpxor 0 * 32(r), t0, t0; \
  373. vpxor 1 * 32(r), t1, t1; \
  374. vpxor 2 * 32(r), t2, t2; \
  375. vpxor 3 * 32(r), t3, t3; \
  376. vmovdqu t0, 0 * 32(r); \
  377. vpbroadcastd krl, t0; /* only lowest 32-bit used */ \
  378. vmovdqu t1, 1 * 32(r); \
  379. vmovdqu t2, 2 * 32(r); \
  380. vmovdqu t3, 3 * 32(r); \
  381. \
  382. /* \
  383. * t2 = krl; \
  384. * t2 &= rl; \
  385. * rr ^= rol32(t2, 1); \
  386. */ \
  387. vpshufb tt0, t0, t3; \
  388. vpsrldq $1, t0, t0; \
  389. vpshufb tt0, t0, t2; \
  390. vpsrldq $1, t0, t0; \
  391. vpshufb tt0, t0, t1; \
  392. vpsrldq $1, t0, t0; \
  393. vpshufb tt0, t0, t0; \
  394. \
  395. vpand 0 * 32(r), t0, t0; \
  396. vpand 1 * 32(r), t1, t1; \
  397. vpand 2 * 32(r), t2, t2; \
  398. vpand 3 * 32(r), t3, t3; \
  399. \
  400. rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
  401. \
  402. vpxor 4 * 32(r), t0, t0; \
  403. vpxor 5 * 32(r), t1, t1; \
  404. vpxor 6 * 32(r), t2, t2; \
  405. vpxor 7 * 32(r), t3, t3; \
  406. vmovdqu t0, 4 * 32(r); \
  407. vpbroadcastd klr, t0; /* only lowest 32-bit used */ \
  408. vmovdqu t1, 5 * 32(r); \
  409. vmovdqu t2, 6 * 32(r); \
  410. vmovdqu t3, 7 * 32(r); \
  411. \
  412. /* \
  413. * t0 = klr; \
  414. * t0 |= lr; \
  415. * ll ^= t0; \
  416. */ \
  417. \
  418. vpshufb tt0, t0, t3; \
  419. vpsrldq $1, t0, t0; \
  420. vpshufb tt0, t0, t2; \
  421. vpsrldq $1, t0, t0; \
  422. vpshufb tt0, t0, t1; \
  423. vpsrldq $1, t0, t0; \
  424. vpshufb tt0, t0, t0; \
  425. \
  426. vpor l4, t0, t0; \
  427. vpor l5, t1, t1; \
  428. vpor l6, t2, t2; \
  429. vpor l7, t3, t3; \
  430. \
  431. vpxor l0, t0, l0; \
  432. vmovdqu l0, 0 * 32(l); \
  433. vpxor l1, t1, l1; \
  434. vmovdqu l1, 1 * 32(l); \
  435. vpxor l2, t2, l2; \
  436. vmovdqu l2, 2 * 32(l); \
  437. vpxor l3, t3, l3; \
  438. vmovdqu l3, 3 * 32(l);
  439. #define transpose_4x4(x0, x1, x2, x3, t1, t2) \
  440. vpunpckhdq x1, x0, t2; \
  441. vpunpckldq x1, x0, x0; \
  442. \
  443. vpunpckldq x3, x2, t1; \
  444. vpunpckhdq x3, x2, x2; \
  445. \
  446. vpunpckhqdq t1, x0, x1; \
  447. vpunpcklqdq t1, x0, x0; \
  448. \
  449. vpunpckhqdq x2, t2, x3; \
  450. vpunpcklqdq x2, t2, x2;
  451. #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \
  452. a3, b3, c3, d3, st0, st1) \
  453. vmovdqu d2, st0; \
  454. vmovdqu d3, st1; \
  455. transpose_4x4(a0, a1, a2, a3, d2, d3); \
  456. transpose_4x4(b0, b1, b2, b3, d2, d3); \
  457. vmovdqu st0, d2; \
  458. vmovdqu st1, d3; \
  459. \
  460. vmovdqu a0, st0; \
  461. vmovdqu a1, st1; \
  462. transpose_4x4(c0, c1, c2, c3, a0, a1); \
  463. transpose_4x4(d0, d1, d2, d3, a0, a1); \
  464. \
  465. vbroadcasti128 .Lshufb_16x16b, a0; \
  466. vmovdqu st1, a1; \
  467. vpshufb a0, a2, a2; \
  468. vpshufb a0, a3, a3; \
  469. vpshufb a0, b0, b0; \
  470. vpshufb a0, b1, b1; \
  471. vpshufb a0, b2, b2; \
  472. vpshufb a0, b3, b3; \
  473. vpshufb a0, a1, a1; \
  474. vpshufb a0, c0, c0; \
  475. vpshufb a0, c1, c1; \
  476. vpshufb a0, c2, c2; \
  477. vpshufb a0, c3, c3; \
  478. vpshufb a0, d0, d0; \
  479. vpshufb a0, d1, d1; \
  480. vpshufb a0, d2, d2; \
  481. vpshufb a0, d3, d3; \
  482. vmovdqu d3, st1; \
  483. vmovdqu st0, d3; \
  484. vpshufb a0, d3, a0; \
  485. vmovdqu d2, st0; \
  486. \
  487. transpose_4x4(a0, b0, c0, d0, d2, d3); \
  488. transpose_4x4(a1, b1, c1, d1, d2, d3); \
  489. vmovdqu st0, d2; \
  490. vmovdqu st1, d3; \
  491. \
  492. vmovdqu b0, st0; \
  493. vmovdqu b1, st1; \
  494. transpose_4x4(a2, b2, c2, d2, b0, b1); \
  495. transpose_4x4(a3, b3, c3, d3, b0, b1); \
  496. vmovdqu st0, b0; \
  497. vmovdqu st1, b1; \
  498. /* does not adjust output bytes inside vectors */
  499. /* load blocks to registers and apply pre-whitening */
  500. #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  501. y6, y7, rio, key) \
  502. vpbroadcastq key, x0; \
  503. vpshufb .Lpack_bswap, x0, x0; \
  504. \
  505. vpxor 0 * 32(rio), x0, y7; \
  506. vpxor 1 * 32(rio), x0, y6; \
  507. vpxor 2 * 32(rio), x0, y5; \
  508. vpxor 3 * 32(rio), x0, y4; \
  509. vpxor 4 * 32(rio), x0, y3; \
  510. vpxor 5 * 32(rio), x0, y2; \
  511. vpxor 6 * 32(rio), x0, y1; \
  512. vpxor 7 * 32(rio), x0, y0; \
  513. vpxor 8 * 32(rio), x0, x7; \
  514. vpxor 9 * 32(rio), x0, x6; \
  515. vpxor 10 * 32(rio), x0, x5; \
  516. vpxor 11 * 32(rio), x0, x4; \
  517. vpxor 12 * 32(rio), x0, x3; \
  518. vpxor 13 * 32(rio), x0, x2; \
  519. vpxor 14 * 32(rio), x0, x1; \
  520. vpxor 15 * 32(rio), x0, x0;
  521. /* byteslice pre-whitened blocks and store to temporary memory */
  522. #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  523. y6, y7, mem_ab, mem_cd) \
  524. byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \
  525. y4, y5, y6, y7, (mem_ab), (mem_cd)); \
  526. \
  527. vmovdqu x0, 0 * 32(mem_ab); \
  528. vmovdqu x1, 1 * 32(mem_ab); \
  529. vmovdqu x2, 2 * 32(mem_ab); \
  530. vmovdqu x3, 3 * 32(mem_ab); \
  531. vmovdqu x4, 4 * 32(mem_ab); \
  532. vmovdqu x5, 5 * 32(mem_ab); \
  533. vmovdqu x6, 6 * 32(mem_ab); \
  534. vmovdqu x7, 7 * 32(mem_ab); \
  535. vmovdqu y0, 0 * 32(mem_cd); \
  536. vmovdqu y1, 1 * 32(mem_cd); \
  537. vmovdqu y2, 2 * 32(mem_cd); \
  538. vmovdqu y3, 3 * 32(mem_cd); \
  539. vmovdqu y4, 4 * 32(mem_cd); \
  540. vmovdqu y5, 5 * 32(mem_cd); \
  541. vmovdqu y6, 6 * 32(mem_cd); \
  542. vmovdqu y7, 7 * 32(mem_cd);
  543. /* de-byteslice, apply post-whitening and store blocks */
  544. #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \
  545. y5, y6, y7, key, stack_tmp0, stack_tmp1) \
  546. byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \
  547. y3, y7, x3, x7, stack_tmp0, stack_tmp1); \
  548. \
  549. vmovdqu x0, stack_tmp0; \
  550. \
  551. vpbroadcastq key, x0; \
  552. vpshufb .Lpack_bswap, x0, x0; \
  553. \
  554. vpxor x0, y7, y7; \
  555. vpxor x0, y6, y6; \
  556. vpxor x0, y5, y5; \
  557. vpxor x0, y4, y4; \
  558. vpxor x0, y3, y3; \
  559. vpxor x0, y2, y2; \
  560. vpxor x0, y1, y1; \
  561. vpxor x0, y0, y0; \
  562. vpxor x0, x7, x7; \
  563. vpxor x0, x6, x6; \
  564. vpxor x0, x5, x5; \
  565. vpxor x0, x4, x4; \
  566. vpxor x0, x3, x3; \
  567. vpxor x0, x2, x2; \
  568. vpxor x0, x1, x1; \
  569. vpxor stack_tmp0, x0, x0;
  570. #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  571. y6, y7, rio) \
  572. vmovdqu x0, 0 * 32(rio); \
  573. vmovdqu x1, 1 * 32(rio); \
  574. vmovdqu x2, 2 * 32(rio); \
  575. vmovdqu x3, 3 * 32(rio); \
  576. vmovdqu x4, 4 * 32(rio); \
  577. vmovdqu x5, 5 * 32(rio); \
  578. vmovdqu x6, 6 * 32(rio); \
  579. vmovdqu x7, 7 * 32(rio); \
  580. vmovdqu y0, 8 * 32(rio); \
  581. vmovdqu y1, 9 * 32(rio); \
  582. vmovdqu y2, 10 * 32(rio); \
  583. vmovdqu y3, 11 * 32(rio); \
  584. vmovdqu y4, 12 * 32(rio); \
  585. vmovdqu y5, 13 * 32(rio); \
  586. vmovdqu y6, 14 * 32(rio); \
  587. vmovdqu y7, 15 * 32(rio);
  588. .data
  589. .align 32
  590. #define SHUFB_BYTES(idx) \
  591. 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
  592. .Lshufb_16x16b:
  593. .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
  594. .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
  595. .Lpack_bswap:
  596. .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
  597. .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
  598. /* For CTR-mode IV byteswap */
  599. .Lbswap128_mask:
  600. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  601. /* For XTS mode */
  602. .Lxts_gf128mul_and_shl1_mask_0:
  603. .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
  604. .Lxts_gf128mul_and_shl1_mask_1:
  605. .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
  606. /*
  607. * pre-SubByte transform
  608. *
  609. * pre-lookup for sbox1, sbox2, sbox3:
  610. * swap_bitendianness(
  611. * isom_map_camellia_to_aes(
  612. * camellia_f(
  613. * swap_bitendianess(in)
  614. * )
  615. * )
  616. * )
  617. *
  618. * (note: '⊕ 0xc5' inside camellia_f())
  619. */
  620. .Lpre_tf_lo_s1:
  621. .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86
  622. .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88
  623. .Lpre_tf_hi_s1:
  624. .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a
  625. .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23
  626. /*
  627. * pre-SubByte transform
  628. *
  629. * pre-lookup for sbox4:
  630. * swap_bitendianness(
  631. * isom_map_camellia_to_aes(
  632. * camellia_f(
  633. * swap_bitendianess(in <<< 1)
  634. * )
  635. * )
  636. * )
  637. *
  638. * (note: '⊕ 0xc5' inside camellia_f())
  639. */
  640. .Lpre_tf_lo_s4:
  641. .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25
  642. .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74
  643. .Lpre_tf_hi_s4:
  644. .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72
  645. .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf
  646. /*
  647. * post-SubByte transform
  648. *
  649. * post-lookup for sbox1, sbox4:
  650. * swap_bitendianness(
  651. * camellia_h(
  652. * isom_map_aes_to_camellia(
  653. * swap_bitendianness(
  654. * aes_inverse_affine_transform(in)
  655. * )
  656. * )
  657. * )
  658. * )
  659. *
  660. * (note: '⊕ 0x6e' inside camellia_h())
  661. */
  662. .Lpost_tf_lo_s1:
  663. .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31
  664. .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1
  665. .Lpost_tf_hi_s1:
  666. .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8
  667. .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c
  668. /*
  669. * post-SubByte transform
  670. *
  671. * post-lookup for sbox2:
  672. * swap_bitendianness(
  673. * camellia_h(
  674. * isom_map_aes_to_camellia(
  675. * swap_bitendianness(
  676. * aes_inverse_affine_transform(in)
  677. * )
  678. * )
  679. * )
  680. * ) <<< 1
  681. *
  682. * (note: '⊕ 0x6e' inside camellia_h())
  683. */
  684. .Lpost_tf_lo_s2:
  685. .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62
  686. .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3
  687. .Lpost_tf_hi_s2:
  688. .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51
  689. .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18
  690. /*
  691. * post-SubByte transform
  692. *
  693. * post-lookup for sbox3:
  694. * swap_bitendianness(
  695. * camellia_h(
  696. * isom_map_aes_to_camellia(
  697. * swap_bitendianness(
  698. * aes_inverse_affine_transform(in)
  699. * )
  700. * )
  701. * )
  702. * ) >>> 1
  703. *
  704. * (note: '⊕ 0x6e' inside camellia_h())
  705. */
  706. .Lpost_tf_lo_s3:
  707. .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98
  708. .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8
  709. .Lpost_tf_hi_s3:
  710. .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54
  711. .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06
  712. /* For isolating SubBytes from AESENCLAST, inverse shift row */
  713. .Linv_shift_row:
  714. .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
  715. .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
  716. .align 4
  717. /* 4-bit mask */
  718. .L0f0f0f0f:
  719. .long 0x0f0f0f0f
  720. .text
  721. .align 8
  722. __camellia_enc_blk32:
  723. /* input:
  724. * %rdi: ctx, CTX
  725. * %rax: temporary storage, 512 bytes
  726. * %ymm0..%ymm15: 32 plaintext blocks
  727. * output:
  728. * %ymm0..%ymm15: 32 encrypted blocks, order swapped:
  729. * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
  730. */
  731. leaq 8 * 32(%rax), %rcx;
  732. inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  733. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  734. %ymm15, %rax, %rcx);
  735. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  736. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  737. %ymm15, %rax, %rcx, 0);
  738. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  739. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  740. %ymm15,
  741. ((key_table + (8) * 8) + 0)(CTX),
  742. ((key_table + (8) * 8) + 4)(CTX),
  743. ((key_table + (8) * 8) + 8)(CTX),
  744. ((key_table + (8) * 8) + 12)(CTX));
  745. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  746. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  747. %ymm15, %rax, %rcx, 8);
  748. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  749. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  750. %ymm15,
  751. ((key_table + (16) * 8) + 0)(CTX),
  752. ((key_table + (16) * 8) + 4)(CTX),
  753. ((key_table + (16) * 8) + 8)(CTX),
  754. ((key_table + (16) * 8) + 12)(CTX));
  755. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  756. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  757. %ymm15, %rax, %rcx, 16);
  758. movl $24, %r8d;
  759. cmpl $16, key_length(CTX);
  760. jne .Lenc_max32;
  761. .Lenc_done:
  762. /* load CD for output */
  763. vmovdqu 0 * 32(%rcx), %ymm8;
  764. vmovdqu 1 * 32(%rcx), %ymm9;
  765. vmovdqu 2 * 32(%rcx), %ymm10;
  766. vmovdqu 3 * 32(%rcx), %ymm11;
  767. vmovdqu 4 * 32(%rcx), %ymm12;
  768. vmovdqu 5 * 32(%rcx), %ymm13;
  769. vmovdqu 6 * 32(%rcx), %ymm14;
  770. vmovdqu 7 * 32(%rcx), %ymm15;
  771. outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  772. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  773. %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
  774. ret;
  775. .align 8
  776. .Lenc_max32:
  777. movl $32, %r8d;
  778. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  779. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  780. %ymm15,
  781. ((key_table + (24) * 8) + 0)(CTX),
  782. ((key_table + (24) * 8) + 4)(CTX),
  783. ((key_table + (24) * 8) + 8)(CTX),
  784. ((key_table + (24) * 8) + 12)(CTX));
  785. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  786. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  787. %ymm15, %rax, %rcx, 24);
  788. jmp .Lenc_done;
  789. ENDPROC(__camellia_enc_blk32)
  790. .align 8
  791. __camellia_dec_blk32:
  792. /* input:
  793. * %rdi: ctx, CTX
  794. * %rax: temporary storage, 512 bytes
  795. * %r8d: 24 for 16 byte key, 32 for larger
  796. * %ymm0..%ymm15: 16 encrypted blocks
  797. * output:
  798. * %ymm0..%ymm15: 16 plaintext blocks, order swapped:
  799. * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
  800. */
  801. leaq 8 * 32(%rax), %rcx;
  802. inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  803. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  804. %ymm15, %rax, %rcx);
  805. cmpl $32, %r8d;
  806. je .Ldec_max32;
  807. .Ldec_max24:
  808. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  809. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  810. %ymm15, %rax, %rcx, 16);
  811. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  812. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  813. %ymm15,
  814. ((key_table + (16) * 8) + 8)(CTX),
  815. ((key_table + (16) * 8) + 12)(CTX),
  816. ((key_table + (16) * 8) + 0)(CTX),
  817. ((key_table + (16) * 8) + 4)(CTX));
  818. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  819. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  820. %ymm15, %rax, %rcx, 8);
  821. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  822. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  823. %ymm15,
  824. ((key_table + (8) * 8) + 8)(CTX),
  825. ((key_table + (8) * 8) + 12)(CTX),
  826. ((key_table + (8) * 8) + 0)(CTX),
  827. ((key_table + (8) * 8) + 4)(CTX));
  828. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  829. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  830. %ymm15, %rax, %rcx, 0);
  831. /* load CD for output */
  832. vmovdqu 0 * 32(%rcx), %ymm8;
  833. vmovdqu 1 * 32(%rcx), %ymm9;
  834. vmovdqu 2 * 32(%rcx), %ymm10;
  835. vmovdqu 3 * 32(%rcx), %ymm11;
  836. vmovdqu 4 * 32(%rcx), %ymm12;
  837. vmovdqu 5 * 32(%rcx), %ymm13;
  838. vmovdqu 6 * 32(%rcx), %ymm14;
  839. vmovdqu 7 * 32(%rcx), %ymm15;
  840. outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  841. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  842. %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
  843. ret;
  844. .align 8
  845. .Ldec_max32:
  846. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  847. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  848. %ymm15, %rax, %rcx, 24);
  849. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  850. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  851. %ymm15,
  852. ((key_table + (24) * 8) + 8)(CTX),
  853. ((key_table + (24) * 8) + 12)(CTX),
  854. ((key_table + (24) * 8) + 0)(CTX),
  855. ((key_table + (24) * 8) + 4)(CTX));
  856. jmp .Ldec_max24;
  857. ENDPROC(__camellia_dec_blk32)
  858. ENTRY(camellia_ecb_enc_32way)
  859. /* input:
  860. * %rdi: ctx, CTX
  861. * %rsi: dst (32 blocks)
  862. * %rdx: src (32 blocks)
  863. */
  864. vzeroupper;
  865. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  866. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  867. %ymm15, %rdx, (key_table)(CTX));
  868. /* now dst can be used as temporary buffer (even in src == dst case) */
  869. movq %rsi, %rax;
  870. call __camellia_enc_blk32;
  871. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  872. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  873. %ymm8, %rsi);
  874. vzeroupper;
  875. ret;
  876. ENDPROC(camellia_ecb_enc_32way)
  877. ENTRY(camellia_ecb_dec_32way)
  878. /* input:
  879. * %rdi: ctx, CTX
  880. * %rsi: dst (32 blocks)
  881. * %rdx: src (32 blocks)
  882. */
  883. vzeroupper;
  884. cmpl $16, key_length(CTX);
  885. movl $32, %r8d;
  886. movl $24, %eax;
  887. cmovel %eax, %r8d; /* max */
  888. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  889. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  890. %ymm15, %rdx, (key_table)(CTX, %r8, 8));
  891. /* now dst can be used as temporary buffer (even in src == dst case) */
  892. movq %rsi, %rax;
  893. call __camellia_dec_blk32;
  894. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  895. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  896. %ymm8, %rsi);
  897. vzeroupper;
  898. ret;
  899. ENDPROC(camellia_ecb_dec_32way)
  900. ENTRY(camellia_cbc_dec_32way)
  901. /* input:
  902. * %rdi: ctx, CTX
  903. * %rsi: dst (32 blocks)
  904. * %rdx: src (32 blocks)
  905. */
  906. vzeroupper;
  907. cmpl $16, key_length(CTX);
  908. movl $32, %r8d;
  909. movl $24, %eax;
  910. cmovel %eax, %r8d; /* max */
  911. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  912. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  913. %ymm15, %rdx, (key_table)(CTX, %r8, 8));
  914. movq %rsp, %r10;
  915. cmpq %rsi, %rdx;
  916. je .Lcbc_dec_use_stack;
  917. /* dst can be used as temporary storage, src is not overwritten. */
  918. movq %rsi, %rax;
  919. jmp .Lcbc_dec_continue;
  920. .Lcbc_dec_use_stack:
  921. /*
  922. * dst still in-use (because dst == src), so use stack for temporary
  923. * storage.
  924. */
  925. subq $(16 * 32), %rsp;
  926. movq %rsp, %rax;
  927. .Lcbc_dec_continue:
  928. call __camellia_dec_blk32;
  929. vmovdqu %ymm7, (%rax);
  930. vpxor %ymm7, %ymm7, %ymm7;
  931. vinserti128 $1, (%rdx), %ymm7, %ymm7;
  932. vpxor (%rax), %ymm7, %ymm7;
  933. movq %r10, %rsp;
  934. vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6;
  935. vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5;
  936. vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4;
  937. vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3;
  938. vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2;
  939. vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1;
  940. vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0;
  941. vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15;
  942. vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14;
  943. vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13;
  944. vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12;
  945. vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11;
  946. vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10;
  947. vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9;
  948. vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8;
  949. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  950. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  951. %ymm8, %rsi);
  952. vzeroupper;
  953. ret;
  954. ENDPROC(camellia_cbc_dec_32way)
  955. #define inc_le128(x, minus_one, tmp) \
  956. vpcmpeqq minus_one, x, tmp; \
  957. vpsubq minus_one, x, x; \
  958. vpslldq $8, tmp, tmp; \
  959. vpsubq tmp, x, x;
  960. #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \
  961. vpcmpeqq minus_one, x, tmp1; \
  962. vpcmpeqq minus_two, x, tmp2; \
  963. vpsubq minus_two, x, x; \
  964. vpor tmp2, tmp1, tmp1; \
  965. vpslldq $8, tmp1, tmp1; \
  966. vpsubq tmp1, x, x;
  967. ENTRY(camellia_ctr_32way)
  968. /* input:
  969. * %rdi: ctx, CTX
  970. * %rsi: dst (32 blocks)
  971. * %rdx: src (32 blocks)
  972. * %rcx: iv (little endian, 128bit)
  973. */
  974. vzeroupper;
  975. movq %rsp, %r10;
  976. cmpq %rsi, %rdx;
  977. je .Lctr_use_stack;
  978. /* dst can be used as temporary storage, src is not overwritten. */
  979. movq %rsi, %rax;
  980. jmp .Lctr_continue;
  981. .Lctr_use_stack:
  982. subq $(16 * 32), %rsp;
  983. movq %rsp, %rax;
  984. .Lctr_continue:
  985. vpcmpeqd %ymm15, %ymm15, %ymm15;
  986. vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */
  987. vpaddq %ymm15, %ymm15, %ymm12; /* ab: -2:0 ; cd: -2:0 */
  988. /* load IV and byteswap */
  989. vmovdqu (%rcx), %xmm0;
  990. vmovdqa %xmm0, %xmm1;
  991. inc_le128(%xmm0, %xmm15, %xmm14);
  992. vbroadcasti128 .Lbswap128_mask, %ymm14;
  993. vinserti128 $1, %xmm0, %ymm1, %ymm0;
  994. vpshufb %ymm14, %ymm0, %ymm13;
  995. vmovdqu %ymm13, 15 * 32(%rax);
  996. /* construct IVs */
  997. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); /* ab:le2 ; cd:le3 */
  998. vpshufb %ymm14, %ymm0, %ymm13;
  999. vmovdqu %ymm13, 14 * 32(%rax);
  1000. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1001. vpshufb %ymm14, %ymm0, %ymm13;
  1002. vmovdqu %ymm13, 13 * 32(%rax);
  1003. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1004. vpshufb %ymm14, %ymm0, %ymm13;
  1005. vmovdqu %ymm13, 12 * 32(%rax);
  1006. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1007. vpshufb %ymm14, %ymm0, %ymm13;
  1008. vmovdqu %ymm13, 11 * 32(%rax);
  1009. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1010. vpshufb %ymm14, %ymm0, %ymm10;
  1011. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1012. vpshufb %ymm14, %ymm0, %ymm9;
  1013. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1014. vpshufb %ymm14, %ymm0, %ymm8;
  1015. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1016. vpshufb %ymm14, %ymm0, %ymm7;
  1017. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1018. vpshufb %ymm14, %ymm0, %ymm6;
  1019. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1020. vpshufb %ymm14, %ymm0, %ymm5;
  1021. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1022. vpshufb %ymm14, %ymm0, %ymm4;
  1023. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1024. vpshufb %ymm14, %ymm0, %ymm3;
  1025. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1026. vpshufb %ymm14, %ymm0, %ymm2;
  1027. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1028. vpshufb %ymm14, %ymm0, %ymm1;
  1029. add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
  1030. vextracti128 $1, %ymm0, %xmm13;
  1031. vpshufb %ymm14, %ymm0, %ymm0;
  1032. inc_le128(%xmm13, %xmm15, %xmm14);
  1033. vmovdqu %xmm13, (%rcx);
  1034. /* inpack32_pre: */
  1035. vpbroadcastq (key_table)(CTX), %ymm15;
  1036. vpshufb .Lpack_bswap, %ymm15, %ymm15;
  1037. vpxor %ymm0, %ymm15, %ymm0;
  1038. vpxor %ymm1, %ymm15, %ymm1;
  1039. vpxor %ymm2, %ymm15, %ymm2;
  1040. vpxor %ymm3, %ymm15, %ymm3;
  1041. vpxor %ymm4, %ymm15, %ymm4;
  1042. vpxor %ymm5, %ymm15, %ymm5;
  1043. vpxor %ymm6, %ymm15, %ymm6;
  1044. vpxor %ymm7, %ymm15, %ymm7;
  1045. vpxor %ymm8, %ymm15, %ymm8;
  1046. vpxor %ymm9, %ymm15, %ymm9;
  1047. vpxor %ymm10, %ymm15, %ymm10;
  1048. vpxor 11 * 32(%rax), %ymm15, %ymm11;
  1049. vpxor 12 * 32(%rax), %ymm15, %ymm12;
  1050. vpxor 13 * 32(%rax), %ymm15, %ymm13;
  1051. vpxor 14 * 32(%rax), %ymm15, %ymm14;
  1052. vpxor 15 * 32(%rax), %ymm15, %ymm15;
  1053. call __camellia_enc_blk32;
  1054. movq %r10, %rsp;
  1055. vpxor 0 * 32(%rdx), %ymm7, %ymm7;
  1056. vpxor 1 * 32(%rdx), %ymm6, %ymm6;
  1057. vpxor 2 * 32(%rdx), %ymm5, %ymm5;
  1058. vpxor 3 * 32(%rdx), %ymm4, %ymm4;
  1059. vpxor 4 * 32(%rdx), %ymm3, %ymm3;
  1060. vpxor 5 * 32(%rdx), %ymm2, %ymm2;
  1061. vpxor 6 * 32(%rdx), %ymm1, %ymm1;
  1062. vpxor 7 * 32(%rdx), %ymm0, %ymm0;
  1063. vpxor 8 * 32(%rdx), %ymm15, %ymm15;
  1064. vpxor 9 * 32(%rdx), %ymm14, %ymm14;
  1065. vpxor 10 * 32(%rdx), %ymm13, %ymm13;
  1066. vpxor 11 * 32(%rdx), %ymm12, %ymm12;
  1067. vpxor 12 * 32(%rdx), %ymm11, %ymm11;
  1068. vpxor 13 * 32(%rdx), %ymm10, %ymm10;
  1069. vpxor 14 * 32(%rdx), %ymm9, %ymm9;
  1070. vpxor 15 * 32(%rdx), %ymm8, %ymm8;
  1071. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  1072. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  1073. %ymm8, %rsi);
  1074. vzeroupper;
  1075. ret;
  1076. ENDPROC(camellia_ctr_32way)
  1077. #define gf128mul_x_ble(iv, mask, tmp) \
  1078. vpsrad $31, iv, tmp; \
  1079. vpaddq iv, iv, iv; \
  1080. vpshufd $0x13, tmp, tmp; \
  1081. vpand mask, tmp, tmp; \
  1082. vpxor tmp, iv, iv;
  1083. #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \
  1084. vpsrad $31, iv, tmp0; \
  1085. vpaddq iv, iv, tmp1; \
  1086. vpsllq $2, iv, iv; \
  1087. vpshufd $0x13, tmp0, tmp0; \
  1088. vpsrad $31, tmp1, tmp1; \
  1089. vpand mask2, tmp0, tmp0; \
  1090. vpshufd $0x13, tmp1, tmp1; \
  1091. vpxor tmp0, iv, iv; \
  1092. vpand mask1, tmp1, tmp1; \
  1093. vpxor tmp1, iv, iv;
  1094. .align 8
  1095. camellia_xts_crypt_32way:
  1096. /* input:
  1097. * %rdi: ctx, CTX
  1098. * %rsi: dst (32 blocks)
  1099. * %rdx: src (32 blocks)
  1100. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  1101. * %r8: index for input whitening key
  1102. * %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32
  1103. */
  1104. vzeroupper;
  1105. subq $(16 * 32), %rsp;
  1106. movq %rsp, %rax;
  1107. vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_0, %ymm12;
  1108. /* load IV and construct second IV */
  1109. vmovdqu (%rcx), %xmm0;
  1110. vmovdqa %xmm0, %xmm15;
  1111. gf128mul_x_ble(%xmm0, %xmm12, %xmm13);
  1112. vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_1, %ymm13;
  1113. vinserti128 $1, %xmm0, %ymm15, %ymm0;
  1114. vpxor 0 * 32(%rdx), %ymm0, %ymm15;
  1115. vmovdqu %ymm15, 15 * 32(%rax);
  1116. vmovdqu %ymm0, 0 * 32(%rsi);
  1117. /* construct IVs */
  1118. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1119. vpxor 1 * 32(%rdx), %ymm0, %ymm15;
  1120. vmovdqu %ymm15, 14 * 32(%rax);
  1121. vmovdqu %ymm0, 1 * 32(%rsi);
  1122. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1123. vpxor 2 * 32(%rdx), %ymm0, %ymm15;
  1124. vmovdqu %ymm15, 13 * 32(%rax);
  1125. vmovdqu %ymm0, 2 * 32(%rsi);
  1126. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1127. vpxor 3 * 32(%rdx), %ymm0, %ymm15;
  1128. vmovdqu %ymm15, 12 * 32(%rax);
  1129. vmovdqu %ymm0, 3 * 32(%rsi);
  1130. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1131. vpxor 4 * 32(%rdx), %ymm0, %ymm11;
  1132. vmovdqu %ymm0, 4 * 32(%rsi);
  1133. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1134. vpxor 5 * 32(%rdx), %ymm0, %ymm10;
  1135. vmovdqu %ymm0, 5 * 32(%rsi);
  1136. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1137. vpxor 6 * 32(%rdx), %ymm0, %ymm9;
  1138. vmovdqu %ymm0, 6 * 32(%rsi);
  1139. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1140. vpxor 7 * 32(%rdx), %ymm0, %ymm8;
  1141. vmovdqu %ymm0, 7 * 32(%rsi);
  1142. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1143. vpxor 8 * 32(%rdx), %ymm0, %ymm7;
  1144. vmovdqu %ymm0, 8 * 32(%rsi);
  1145. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1146. vpxor 9 * 32(%rdx), %ymm0, %ymm6;
  1147. vmovdqu %ymm0, 9 * 32(%rsi);
  1148. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1149. vpxor 10 * 32(%rdx), %ymm0, %ymm5;
  1150. vmovdqu %ymm0, 10 * 32(%rsi);
  1151. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1152. vpxor 11 * 32(%rdx), %ymm0, %ymm4;
  1153. vmovdqu %ymm0, 11 * 32(%rsi);
  1154. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1155. vpxor 12 * 32(%rdx), %ymm0, %ymm3;
  1156. vmovdqu %ymm0, 12 * 32(%rsi);
  1157. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1158. vpxor 13 * 32(%rdx), %ymm0, %ymm2;
  1159. vmovdqu %ymm0, 13 * 32(%rsi);
  1160. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1161. vpxor 14 * 32(%rdx), %ymm0, %ymm1;
  1162. vmovdqu %ymm0, 14 * 32(%rsi);
  1163. gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
  1164. vpxor 15 * 32(%rdx), %ymm0, %ymm15;
  1165. vmovdqu %ymm15, 0 * 32(%rax);
  1166. vmovdqu %ymm0, 15 * 32(%rsi);
  1167. vextracti128 $1, %ymm0, %xmm0;
  1168. gf128mul_x_ble(%xmm0, %xmm12, %xmm15);
  1169. vmovdqu %xmm0, (%rcx);
  1170. /* inpack32_pre: */
  1171. vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15;
  1172. vpshufb .Lpack_bswap, %ymm15, %ymm15;
  1173. vpxor 0 * 32(%rax), %ymm15, %ymm0;
  1174. vpxor %ymm1, %ymm15, %ymm1;
  1175. vpxor %ymm2, %ymm15, %ymm2;
  1176. vpxor %ymm3, %ymm15, %ymm3;
  1177. vpxor %ymm4, %ymm15, %ymm4;
  1178. vpxor %ymm5, %ymm15, %ymm5;
  1179. vpxor %ymm6, %ymm15, %ymm6;
  1180. vpxor %ymm7, %ymm15, %ymm7;
  1181. vpxor %ymm8, %ymm15, %ymm8;
  1182. vpxor %ymm9, %ymm15, %ymm9;
  1183. vpxor %ymm10, %ymm15, %ymm10;
  1184. vpxor %ymm11, %ymm15, %ymm11;
  1185. vpxor 12 * 32(%rax), %ymm15, %ymm12;
  1186. vpxor 13 * 32(%rax), %ymm15, %ymm13;
  1187. vpxor 14 * 32(%rax), %ymm15, %ymm14;
  1188. vpxor 15 * 32(%rax), %ymm15, %ymm15;
  1189. call *%r9;
  1190. addq $(16 * 32), %rsp;
  1191. vpxor 0 * 32(%rsi), %ymm7, %ymm7;
  1192. vpxor 1 * 32(%rsi), %ymm6, %ymm6;
  1193. vpxor 2 * 32(%rsi), %ymm5, %ymm5;
  1194. vpxor 3 * 32(%rsi), %ymm4, %ymm4;
  1195. vpxor 4 * 32(%rsi), %ymm3, %ymm3;
  1196. vpxor 5 * 32(%rsi), %ymm2, %ymm2;
  1197. vpxor 6 * 32(%rsi), %ymm1, %ymm1;
  1198. vpxor 7 * 32(%rsi), %ymm0, %ymm0;
  1199. vpxor 8 * 32(%rsi), %ymm15, %ymm15;
  1200. vpxor 9 * 32(%rsi), %ymm14, %ymm14;
  1201. vpxor 10 * 32(%rsi), %ymm13, %ymm13;
  1202. vpxor 11 * 32(%rsi), %ymm12, %ymm12;
  1203. vpxor 12 * 32(%rsi), %ymm11, %ymm11;
  1204. vpxor 13 * 32(%rsi), %ymm10, %ymm10;
  1205. vpxor 14 * 32(%rsi), %ymm9, %ymm9;
  1206. vpxor 15 * 32(%rsi), %ymm8, %ymm8;
  1207. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  1208. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  1209. %ymm8, %rsi);
  1210. vzeroupper;
  1211. ret;
  1212. ENDPROC(camellia_xts_crypt_32way)
  1213. ENTRY(camellia_xts_enc_32way)
  1214. /* input:
  1215. * %rdi: ctx, CTX
  1216. * %rsi: dst (32 blocks)
  1217. * %rdx: src (32 blocks)
  1218. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  1219. */
  1220. xorl %r8d, %r8d; /* input whitening key, 0 for enc */
  1221. leaq __camellia_enc_blk32, %r9;
  1222. jmp camellia_xts_crypt_32way;
  1223. ENDPROC(camellia_xts_enc_32way)
  1224. ENTRY(camellia_xts_dec_32way)
  1225. /* input:
  1226. * %rdi: ctx, CTX
  1227. * %rsi: dst (32 blocks)
  1228. * %rdx: src (32 blocks)
  1229. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  1230. */
  1231. cmpl $16, key_length(CTX);
  1232. movl $32, %r8d;
  1233. movl $24, %eax;
  1234. cmovel %eax, %r8d; /* input whitening key, last for dec */
  1235. leaq __camellia_dec_blk32, %r9;
  1236. jmp camellia_xts_crypt_32way;
  1237. ENDPROC(camellia_xts_dec_32way)