serpent_avx_glue.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * Glue Code for AVX assembler versions of Serpent Cipher
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Glue code based on serpent_sse2_glue.c by:
  8. * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  23. * USA
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <linux/hardirq.h>
  28. #include <linux/types.h>
  29. #include <linux/crypto.h>
  30. #include <linux/err.h>
  31. #include <crypto/algapi.h>
  32. #include <crypto/serpent.h>
  33. #include <crypto/cryptd.h>
  34. #include <crypto/b128ops.h>
  35. #include <crypto/ctr.h>
  36. #include <crypto/lrw.h>
  37. #include <crypto/xts.h>
  38. #include <asm/xcr.h>
  39. #include <asm/xsave.h>
  40. #include <asm/crypto/serpent-avx.h>
  41. #include <asm/crypto/ablk_helper.h>
  42. #include <asm/crypto/glue_helper.h>
  43. static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
  44. {
  45. u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
  46. unsigned int j;
  47. for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
  48. ivs[j] = src[j];
  49. serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
  50. for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
  51. u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
  52. }
  53. static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
  54. {
  55. be128 ctrblk;
  56. u128_to_be128(&ctrblk, iv);
  57. u128_inc(iv);
  58. __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
  59. u128_xor(dst, src, (u128 *)&ctrblk);
  60. }
  61. static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
  62. u128 *iv)
  63. {
  64. be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
  65. unsigned int i;
  66. for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
  67. if (dst != src)
  68. dst[i] = src[i];
  69. u128_to_be128(&ctrblks[i], iv);
  70. u128_inc(iv);
  71. }
  72. serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
  73. }
  74. static const struct common_glue_ctx serpent_enc = {
  75. .num_funcs = 2,
  76. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  77. .funcs = { {
  78. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  79. .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
  80. }, {
  81. .num_blocks = 1,
  82. .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
  83. } }
  84. };
  85. static const struct common_glue_ctx serpent_ctr = {
  86. .num_funcs = 2,
  87. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  88. .funcs = { {
  89. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  90. .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
  91. }, {
  92. .num_blocks = 1,
  93. .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
  94. } }
  95. };
  96. static const struct common_glue_ctx serpent_dec = {
  97. .num_funcs = 2,
  98. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  99. .funcs = { {
  100. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  101. .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
  102. }, {
  103. .num_blocks = 1,
  104. .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
  105. } }
  106. };
  107. static const struct common_glue_ctx serpent_dec_cbc = {
  108. .num_funcs = 2,
  109. .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
  110. .funcs = { {
  111. .num_blocks = SERPENT_PARALLEL_BLOCKS,
  112. .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
  113. }, {
  114. .num_blocks = 1,
  115. .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
  116. } }
  117. };
  118. static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  119. struct scatterlist *src, unsigned int nbytes)
  120. {
  121. return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
  122. }
  123. static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  124. struct scatterlist *src, unsigned int nbytes)
  125. {
  126. return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
  127. }
  128. static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  129. struct scatterlist *src, unsigned int nbytes)
  130. {
  131. return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
  132. dst, src, nbytes);
  133. }
  134. static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  135. struct scatterlist *src, unsigned int nbytes)
  136. {
  137. return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
  138. nbytes);
  139. }
  140. static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  141. struct scatterlist *src, unsigned int nbytes)
  142. {
  143. return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
  144. }
  145. static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
  146. {
  147. return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
  148. NULL, fpu_enabled, nbytes);
  149. }
  150. static inline void serpent_fpu_end(bool fpu_enabled)
  151. {
  152. glue_fpu_end(fpu_enabled);
  153. }
  154. struct crypt_priv {
  155. struct serpent_ctx *ctx;
  156. bool fpu_enabled;
  157. };
  158. static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
  159. {
  160. const unsigned int bsize = SERPENT_BLOCK_SIZE;
  161. struct crypt_priv *ctx = priv;
  162. int i;
  163. ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
  164. if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
  165. serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
  166. return;
  167. }
  168. for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
  169. __serpent_encrypt(ctx->ctx, srcdst, srcdst);
  170. }
  171. static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
  172. {
  173. const unsigned int bsize = SERPENT_BLOCK_SIZE;
  174. struct crypt_priv *ctx = priv;
  175. int i;
  176. ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
  177. if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
  178. serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
  179. return;
  180. }
  181. for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
  182. __serpent_decrypt(ctx->ctx, srcdst, srcdst);
  183. }
  184. struct serpent_lrw_ctx {
  185. struct lrw_table_ctx lrw_table;
  186. struct serpent_ctx serpent_ctx;
  187. };
  188. static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
  189. unsigned int keylen)
  190. {
  191. struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
  192. int err;
  193. err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
  194. SERPENT_BLOCK_SIZE);
  195. if (err)
  196. return err;
  197. return lrw_init_table(&ctx->lrw_table, key + keylen -
  198. SERPENT_BLOCK_SIZE);
  199. }
  200. static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  201. struct scatterlist *src, unsigned int nbytes)
  202. {
  203. struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  204. be128 buf[SERPENT_PARALLEL_BLOCKS];
  205. struct crypt_priv crypt_ctx = {
  206. .ctx = &ctx->serpent_ctx,
  207. .fpu_enabled = false,
  208. };
  209. struct lrw_crypt_req req = {
  210. .tbuf = buf,
  211. .tbuflen = sizeof(buf),
  212. .table_ctx = &ctx->lrw_table,
  213. .crypt_ctx = &crypt_ctx,
  214. .crypt_fn = encrypt_callback,
  215. };
  216. int ret;
  217. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  218. ret = lrw_crypt(desc, dst, src, nbytes, &req);
  219. serpent_fpu_end(crypt_ctx.fpu_enabled);
  220. return ret;
  221. }
  222. static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  223. struct scatterlist *src, unsigned int nbytes)
  224. {
  225. struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  226. be128 buf[SERPENT_PARALLEL_BLOCKS];
  227. struct crypt_priv crypt_ctx = {
  228. .ctx = &ctx->serpent_ctx,
  229. .fpu_enabled = false,
  230. };
  231. struct lrw_crypt_req req = {
  232. .tbuf = buf,
  233. .tbuflen = sizeof(buf),
  234. .table_ctx = &ctx->lrw_table,
  235. .crypt_ctx = &crypt_ctx,
  236. .crypt_fn = decrypt_callback,
  237. };
  238. int ret;
  239. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  240. ret = lrw_crypt(desc, dst, src, nbytes, &req);
  241. serpent_fpu_end(crypt_ctx.fpu_enabled);
  242. return ret;
  243. }
  244. static void lrw_exit_tfm(struct crypto_tfm *tfm)
  245. {
  246. struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
  247. lrw_free_table(&ctx->lrw_table);
  248. }
  249. struct serpent_xts_ctx {
  250. struct serpent_ctx tweak_ctx;
  251. struct serpent_ctx crypt_ctx;
  252. };
  253. static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
  254. unsigned int keylen)
  255. {
  256. struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
  257. u32 *flags = &tfm->crt_flags;
  258. int err;
  259. /* key consists of keys of equal size concatenated, therefore
  260. * the length must be even
  261. */
  262. if (keylen % 2) {
  263. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  264. return -EINVAL;
  265. }
  266. /* first half of xts-key is for crypt */
  267. err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
  268. if (err)
  269. return err;
  270. /* second half of xts-key is for tweak */
  271. return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
  272. }
  273. static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  274. struct scatterlist *src, unsigned int nbytes)
  275. {
  276. struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  277. be128 buf[SERPENT_PARALLEL_BLOCKS];
  278. struct crypt_priv crypt_ctx = {
  279. .ctx = &ctx->crypt_ctx,
  280. .fpu_enabled = false,
  281. };
  282. struct xts_crypt_req req = {
  283. .tbuf = buf,
  284. .tbuflen = sizeof(buf),
  285. .tweak_ctx = &ctx->tweak_ctx,
  286. .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
  287. .crypt_ctx = &crypt_ctx,
  288. .crypt_fn = encrypt_callback,
  289. };
  290. int ret;
  291. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  292. ret = xts_crypt(desc, dst, src, nbytes, &req);
  293. serpent_fpu_end(crypt_ctx.fpu_enabled);
  294. return ret;
  295. }
  296. static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  297. struct scatterlist *src, unsigned int nbytes)
  298. {
  299. struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  300. be128 buf[SERPENT_PARALLEL_BLOCKS];
  301. struct crypt_priv crypt_ctx = {
  302. .ctx = &ctx->crypt_ctx,
  303. .fpu_enabled = false,
  304. };
  305. struct xts_crypt_req req = {
  306. .tbuf = buf,
  307. .tbuflen = sizeof(buf),
  308. .tweak_ctx = &ctx->tweak_ctx,
  309. .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
  310. .crypt_ctx = &crypt_ctx,
  311. .crypt_fn = decrypt_callback,
  312. };
  313. int ret;
  314. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  315. ret = xts_crypt(desc, dst, src, nbytes, &req);
  316. serpent_fpu_end(crypt_ctx.fpu_enabled);
  317. return ret;
  318. }
  319. static struct crypto_alg serpent_algs[10] = { {
  320. .cra_name = "__ecb-serpent-avx",
  321. .cra_driver_name = "__driver-ecb-serpent-avx",
  322. .cra_priority = 0,
  323. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  324. .cra_blocksize = SERPENT_BLOCK_SIZE,
  325. .cra_ctxsize = sizeof(struct serpent_ctx),
  326. .cra_alignmask = 0,
  327. .cra_type = &crypto_blkcipher_type,
  328. .cra_module = THIS_MODULE,
  329. .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
  330. .cra_u = {
  331. .blkcipher = {
  332. .min_keysize = SERPENT_MIN_KEY_SIZE,
  333. .max_keysize = SERPENT_MAX_KEY_SIZE,
  334. .setkey = serpent_setkey,
  335. .encrypt = ecb_encrypt,
  336. .decrypt = ecb_decrypt,
  337. },
  338. },
  339. }, {
  340. .cra_name = "__cbc-serpent-avx",
  341. .cra_driver_name = "__driver-cbc-serpent-avx",
  342. .cra_priority = 0,
  343. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  344. .cra_blocksize = SERPENT_BLOCK_SIZE,
  345. .cra_ctxsize = sizeof(struct serpent_ctx),
  346. .cra_alignmask = 0,
  347. .cra_type = &crypto_blkcipher_type,
  348. .cra_module = THIS_MODULE,
  349. .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
  350. .cra_u = {
  351. .blkcipher = {
  352. .min_keysize = SERPENT_MIN_KEY_SIZE,
  353. .max_keysize = SERPENT_MAX_KEY_SIZE,
  354. .setkey = serpent_setkey,
  355. .encrypt = cbc_encrypt,
  356. .decrypt = cbc_decrypt,
  357. },
  358. },
  359. }, {
  360. .cra_name = "__ctr-serpent-avx",
  361. .cra_driver_name = "__driver-ctr-serpent-avx",
  362. .cra_priority = 0,
  363. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  364. .cra_blocksize = 1,
  365. .cra_ctxsize = sizeof(struct serpent_ctx),
  366. .cra_alignmask = 0,
  367. .cra_type = &crypto_blkcipher_type,
  368. .cra_module = THIS_MODULE,
  369. .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
  370. .cra_u = {
  371. .blkcipher = {
  372. .min_keysize = SERPENT_MIN_KEY_SIZE,
  373. .max_keysize = SERPENT_MAX_KEY_SIZE,
  374. .ivsize = SERPENT_BLOCK_SIZE,
  375. .setkey = serpent_setkey,
  376. .encrypt = ctr_crypt,
  377. .decrypt = ctr_crypt,
  378. },
  379. },
  380. }, {
  381. .cra_name = "__lrw-serpent-avx",
  382. .cra_driver_name = "__driver-lrw-serpent-avx",
  383. .cra_priority = 0,
  384. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  385. .cra_blocksize = SERPENT_BLOCK_SIZE,
  386. .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
  387. .cra_alignmask = 0,
  388. .cra_type = &crypto_blkcipher_type,
  389. .cra_module = THIS_MODULE,
  390. .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
  391. .cra_exit = lrw_exit_tfm,
  392. .cra_u = {
  393. .blkcipher = {
  394. .min_keysize = SERPENT_MIN_KEY_SIZE +
  395. SERPENT_BLOCK_SIZE,
  396. .max_keysize = SERPENT_MAX_KEY_SIZE +
  397. SERPENT_BLOCK_SIZE,
  398. .ivsize = SERPENT_BLOCK_SIZE,
  399. .setkey = lrw_serpent_setkey,
  400. .encrypt = lrw_encrypt,
  401. .decrypt = lrw_decrypt,
  402. },
  403. },
  404. }, {
  405. .cra_name = "__xts-serpent-avx",
  406. .cra_driver_name = "__driver-xts-serpent-avx",
  407. .cra_priority = 0,
  408. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  409. .cra_blocksize = SERPENT_BLOCK_SIZE,
  410. .cra_ctxsize = sizeof(struct serpent_xts_ctx),
  411. .cra_alignmask = 0,
  412. .cra_type = &crypto_blkcipher_type,
  413. .cra_module = THIS_MODULE,
  414. .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
  415. .cra_u = {
  416. .blkcipher = {
  417. .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
  418. .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
  419. .ivsize = SERPENT_BLOCK_SIZE,
  420. .setkey = xts_serpent_setkey,
  421. .encrypt = xts_encrypt,
  422. .decrypt = xts_decrypt,
  423. },
  424. },
  425. }, {
  426. .cra_name = "ecb(serpent)",
  427. .cra_driver_name = "ecb-serpent-avx",
  428. .cra_priority = 500,
  429. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  430. .cra_blocksize = SERPENT_BLOCK_SIZE,
  431. .cra_ctxsize = sizeof(struct async_helper_ctx),
  432. .cra_alignmask = 0,
  433. .cra_type = &crypto_ablkcipher_type,
  434. .cra_module = THIS_MODULE,
  435. .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
  436. .cra_init = ablk_init,
  437. .cra_exit = ablk_exit,
  438. .cra_u = {
  439. .ablkcipher = {
  440. .min_keysize = SERPENT_MIN_KEY_SIZE,
  441. .max_keysize = SERPENT_MAX_KEY_SIZE,
  442. .setkey = ablk_set_key,
  443. .encrypt = ablk_encrypt,
  444. .decrypt = ablk_decrypt,
  445. },
  446. },
  447. }, {
  448. .cra_name = "cbc(serpent)",
  449. .cra_driver_name = "cbc-serpent-avx",
  450. .cra_priority = 500,
  451. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  452. .cra_blocksize = SERPENT_BLOCK_SIZE,
  453. .cra_ctxsize = sizeof(struct async_helper_ctx),
  454. .cra_alignmask = 0,
  455. .cra_type = &crypto_ablkcipher_type,
  456. .cra_module = THIS_MODULE,
  457. .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
  458. .cra_init = ablk_init,
  459. .cra_exit = ablk_exit,
  460. .cra_u = {
  461. .ablkcipher = {
  462. .min_keysize = SERPENT_MIN_KEY_SIZE,
  463. .max_keysize = SERPENT_MAX_KEY_SIZE,
  464. .ivsize = SERPENT_BLOCK_SIZE,
  465. .setkey = ablk_set_key,
  466. .encrypt = __ablk_encrypt,
  467. .decrypt = ablk_decrypt,
  468. },
  469. },
  470. }, {
  471. .cra_name = "ctr(serpent)",
  472. .cra_driver_name = "ctr-serpent-avx",
  473. .cra_priority = 500,
  474. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  475. .cra_blocksize = 1,
  476. .cra_ctxsize = sizeof(struct async_helper_ctx),
  477. .cra_alignmask = 0,
  478. .cra_type = &crypto_ablkcipher_type,
  479. .cra_module = THIS_MODULE,
  480. .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
  481. .cra_init = ablk_init,
  482. .cra_exit = ablk_exit,
  483. .cra_u = {
  484. .ablkcipher = {
  485. .min_keysize = SERPENT_MIN_KEY_SIZE,
  486. .max_keysize = SERPENT_MAX_KEY_SIZE,
  487. .ivsize = SERPENT_BLOCK_SIZE,
  488. .setkey = ablk_set_key,
  489. .encrypt = ablk_encrypt,
  490. .decrypt = ablk_encrypt,
  491. .geniv = "chainiv",
  492. },
  493. },
  494. }, {
  495. .cra_name = "lrw(serpent)",
  496. .cra_driver_name = "lrw-serpent-avx",
  497. .cra_priority = 500,
  498. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  499. .cra_blocksize = SERPENT_BLOCK_SIZE,
  500. .cra_ctxsize = sizeof(struct async_helper_ctx),
  501. .cra_alignmask = 0,
  502. .cra_type = &crypto_ablkcipher_type,
  503. .cra_module = THIS_MODULE,
  504. .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
  505. .cra_init = ablk_init,
  506. .cra_exit = ablk_exit,
  507. .cra_u = {
  508. .ablkcipher = {
  509. .min_keysize = SERPENT_MIN_KEY_SIZE +
  510. SERPENT_BLOCK_SIZE,
  511. .max_keysize = SERPENT_MAX_KEY_SIZE +
  512. SERPENT_BLOCK_SIZE,
  513. .ivsize = SERPENT_BLOCK_SIZE,
  514. .setkey = ablk_set_key,
  515. .encrypt = ablk_encrypt,
  516. .decrypt = ablk_decrypt,
  517. },
  518. },
  519. }, {
  520. .cra_name = "xts(serpent)",
  521. .cra_driver_name = "xts-serpent-avx",
  522. .cra_priority = 500,
  523. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  524. .cra_blocksize = SERPENT_BLOCK_SIZE,
  525. .cra_ctxsize = sizeof(struct async_helper_ctx),
  526. .cra_alignmask = 0,
  527. .cra_type = &crypto_ablkcipher_type,
  528. .cra_module = THIS_MODULE,
  529. .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
  530. .cra_init = ablk_init,
  531. .cra_exit = ablk_exit,
  532. .cra_u = {
  533. .ablkcipher = {
  534. .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
  535. .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
  536. .ivsize = SERPENT_BLOCK_SIZE,
  537. .setkey = ablk_set_key,
  538. .encrypt = ablk_encrypt,
  539. .decrypt = ablk_decrypt,
  540. },
  541. },
  542. } };
  543. static int __init serpent_init(void)
  544. {
  545. u64 xcr0;
  546. if (!cpu_has_avx || !cpu_has_osxsave) {
  547. printk(KERN_INFO "AVX instructions are not detected.\n");
  548. return -ENODEV;
  549. }
  550. xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  551. if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
  552. printk(KERN_INFO "AVX detected but unusable.\n");
  553. return -ENODEV;
  554. }
  555. return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
  556. }
  557. static void __exit serpent_exit(void)
  558. {
  559. crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
  560. }
  561. module_init(serpent_init);
  562. module_exit(serpent_exit);
  563. MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
  564. MODULE_LICENSE("GPL");
  565. MODULE_ALIAS("serpent");