cast5_avx_glue.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /*
  2. * Glue Code for the AVX assembler implemention of the Cast5 Cipher
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  20. * USA
  21. *
  22. */
  23. #include <linux/module.h>
  24. #include <linux/hardirq.h>
  25. #include <linux/types.h>
  26. #include <linux/crypto.h>
  27. #include <linux/err.h>
  28. #include <crypto/algapi.h>
  29. #include <crypto/cast5.h>
  30. #include <crypto/cryptd.h>
  31. #include <crypto/ctr.h>
  32. #include <asm/xcr.h>
  33. #include <asm/xsave.h>
  34. #include <asm/crypto/ablk_helper.h>
  35. #include <asm/crypto/glue_helper.h>
  36. #define CAST5_PARALLEL_BLOCKS 16
  37. asmlinkage void __cast5_enc_blk_16way(struct cast5_ctx *ctx, u8 *dst,
  38. const u8 *src, bool xor);
  39. asmlinkage void cast5_dec_blk_16way(struct cast5_ctx *ctx, u8 *dst,
  40. const u8 *src);
  41. static inline void cast5_enc_blk_xway(struct cast5_ctx *ctx, u8 *dst,
  42. const u8 *src)
  43. {
  44. __cast5_enc_blk_16way(ctx, dst, src, false);
  45. }
  46. static inline void cast5_enc_blk_xway_xor(struct cast5_ctx *ctx, u8 *dst,
  47. const u8 *src)
  48. {
  49. __cast5_enc_blk_16way(ctx, dst, src, true);
  50. }
  51. static inline void cast5_dec_blk_xway(struct cast5_ctx *ctx, u8 *dst,
  52. const u8 *src)
  53. {
  54. cast5_dec_blk_16way(ctx, dst, src);
  55. }
  56. static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
  57. {
  58. return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
  59. NULL, fpu_enabled, nbytes);
  60. }
  61. static inline void cast5_fpu_end(bool fpu_enabled)
  62. {
  63. return glue_fpu_end(fpu_enabled);
  64. }
  65. static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
  66. bool enc)
  67. {
  68. bool fpu_enabled = false;
  69. struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  70. const unsigned int bsize = CAST5_BLOCK_SIZE;
  71. unsigned int nbytes;
  72. int err;
  73. err = blkcipher_walk_virt(desc, walk);
  74. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  75. while ((nbytes = walk->nbytes)) {
  76. u8 *wsrc = walk->src.virt.addr;
  77. u8 *wdst = walk->dst.virt.addr;
  78. fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
  79. /* Process multi-block batch */
  80. if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
  81. do {
  82. if (enc)
  83. cast5_enc_blk_xway(ctx, wdst, wsrc);
  84. else
  85. cast5_dec_blk_xway(ctx, wdst, wsrc);
  86. wsrc += bsize * CAST5_PARALLEL_BLOCKS;
  87. wdst += bsize * CAST5_PARALLEL_BLOCKS;
  88. nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
  89. } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
  90. if (nbytes < bsize)
  91. goto done;
  92. }
  93. /* Handle leftovers */
  94. do {
  95. if (enc)
  96. __cast5_encrypt(ctx, wdst, wsrc);
  97. else
  98. __cast5_decrypt(ctx, wdst, wsrc);
  99. wsrc += bsize;
  100. wdst += bsize;
  101. nbytes -= bsize;
  102. } while (nbytes >= bsize);
  103. done:
  104. err = blkcipher_walk_done(desc, walk, nbytes);
  105. }
  106. cast5_fpu_end(fpu_enabled);
  107. return err;
  108. }
  109. static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  110. struct scatterlist *src, unsigned int nbytes)
  111. {
  112. struct blkcipher_walk walk;
  113. blkcipher_walk_init(&walk, dst, src, nbytes);
  114. return ecb_crypt(desc, &walk, true);
  115. }
  116. static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  117. struct scatterlist *src, unsigned int nbytes)
  118. {
  119. struct blkcipher_walk walk;
  120. blkcipher_walk_init(&walk, dst, src, nbytes);
  121. return ecb_crypt(desc, &walk, false);
  122. }
  123. static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
  124. struct blkcipher_walk *walk)
  125. {
  126. struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  127. const unsigned int bsize = CAST5_BLOCK_SIZE;
  128. unsigned int nbytes = walk->nbytes;
  129. u64 *src = (u64 *)walk->src.virt.addr;
  130. u64 *dst = (u64 *)walk->dst.virt.addr;
  131. u64 *iv = (u64 *)walk->iv;
  132. do {
  133. *dst = *src ^ *iv;
  134. __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
  135. iv = dst;
  136. src += 1;
  137. dst += 1;
  138. nbytes -= bsize;
  139. } while (nbytes >= bsize);
  140. *(u64 *)walk->iv = *iv;
  141. return nbytes;
  142. }
  143. static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  144. struct scatterlist *src, unsigned int nbytes)
  145. {
  146. struct blkcipher_walk walk;
  147. int err;
  148. blkcipher_walk_init(&walk, dst, src, nbytes);
  149. err = blkcipher_walk_virt(desc, &walk);
  150. while ((nbytes = walk.nbytes)) {
  151. nbytes = __cbc_encrypt(desc, &walk);
  152. err = blkcipher_walk_done(desc, &walk, nbytes);
  153. }
  154. return err;
  155. }
  156. static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
  157. struct blkcipher_walk *walk)
  158. {
  159. struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  160. const unsigned int bsize = CAST5_BLOCK_SIZE;
  161. unsigned int nbytes = walk->nbytes;
  162. u64 *src = (u64 *)walk->src.virt.addr;
  163. u64 *dst = (u64 *)walk->dst.virt.addr;
  164. u64 ivs[CAST5_PARALLEL_BLOCKS - 1];
  165. u64 last_iv;
  166. int i;
  167. /* Start of the last block. */
  168. src += nbytes / bsize - 1;
  169. dst += nbytes / bsize - 1;
  170. last_iv = *src;
  171. /* Process multi-block batch */
  172. if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
  173. do {
  174. nbytes -= bsize * (CAST5_PARALLEL_BLOCKS - 1);
  175. src -= CAST5_PARALLEL_BLOCKS - 1;
  176. dst -= CAST5_PARALLEL_BLOCKS - 1;
  177. for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
  178. ivs[i] = src[i];
  179. cast5_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
  180. for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
  181. *(dst + (i + 1)) ^= *(ivs + i);
  182. nbytes -= bsize;
  183. if (nbytes < bsize)
  184. goto done;
  185. *dst ^= *(src - 1);
  186. src -= 1;
  187. dst -= 1;
  188. } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
  189. if (nbytes < bsize)
  190. goto done;
  191. }
  192. /* Handle leftovers */
  193. for (;;) {
  194. __cast5_decrypt(ctx, (u8 *)dst, (u8 *)src);
  195. nbytes -= bsize;
  196. if (nbytes < bsize)
  197. break;
  198. *dst ^= *(src - 1);
  199. src -= 1;
  200. dst -= 1;
  201. }
  202. done:
  203. *dst ^= *(u64 *)walk->iv;
  204. *(u64 *)walk->iv = last_iv;
  205. return nbytes;
  206. }
  207. static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  208. struct scatterlist *src, unsigned int nbytes)
  209. {
  210. bool fpu_enabled = false;
  211. struct blkcipher_walk walk;
  212. int err;
  213. blkcipher_walk_init(&walk, dst, src, nbytes);
  214. err = blkcipher_walk_virt(desc, &walk);
  215. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  216. while ((nbytes = walk.nbytes)) {
  217. fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
  218. nbytes = __cbc_decrypt(desc, &walk);
  219. err = blkcipher_walk_done(desc, &walk, nbytes);
  220. }
  221. cast5_fpu_end(fpu_enabled);
  222. return err;
  223. }
  224. static void ctr_crypt_final(struct blkcipher_desc *desc,
  225. struct blkcipher_walk *walk)
  226. {
  227. struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  228. u8 *ctrblk = walk->iv;
  229. u8 keystream[CAST5_BLOCK_SIZE];
  230. u8 *src = walk->src.virt.addr;
  231. u8 *dst = walk->dst.virt.addr;
  232. unsigned int nbytes = walk->nbytes;
  233. __cast5_encrypt(ctx, keystream, ctrblk);
  234. crypto_xor(keystream, src, nbytes);
  235. memcpy(dst, keystream, nbytes);
  236. crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
  237. }
  238. static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
  239. struct blkcipher_walk *walk)
  240. {
  241. struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  242. const unsigned int bsize = CAST5_BLOCK_SIZE;
  243. unsigned int nbytes = walk->nbytes;
  244. u64 *src = (u64 *)walk->src.virt.addr;
  245. u64 *dst = (u64 *)walk->dst.virt.addr;
  246. u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
  247. __be64 ctrblocks[CAST5_PARALLEL_BLOCKS];
  248. int i;
  249. /* Process multi-block batch */
  250. if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
  251. do {
  252. /* create ctrblks for parallel encrypt */
  253. for (i = 0; i < CAST5_PARALLEL_BLOCKS; i++) {
  254. if (dst != src)
  255. dst[i] = src[i];
  256. ctrblocks[i] = cpu_to_be64(ctrblk++);
  257. }
  258. cast5_enc_blk_xway_xor(ctx, (u8 *)dst,
  259. (u8 *)ctrblocks);
  260. src += CAST5_PARALLEL_BLOCKS;
  261. dst += CAST5_PARALLEL_BLOCKS;
  262. nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
  263. } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
  264. if (nbytes < bsize)
  265. goto done;
  266. }
  267. /* Handle leftovers */
  268. do {
  269. if (dst != src)
  270. *dst = *src;
  271. ctrblocks[0] = cpu_to_be64(ctrblk++);
  272. __cast5_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
  273. *dst ^= ctrblocks[0];
  274. src += 1;
  275. dst += 1;
  276. nbytes -= bsize;
  277. } while (nbytes >= bsize);
  278. done:
  279. *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
  280. return nbytes;
  281. }
  282. static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  283. struct scatterlist *src, unsigned int nbytes)
  284. {
  285. bool fpu_enabled = false;
  286. struct blkcipher_walk walk;
  287. int err;
  288. blkcipher_walk_init(&walk, dst, src, nbytes);
  289. err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
  290. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  291. while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
  292. fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
  293. nbytes = __ctr_crypt(desc, &walk);
  294. err = blkcipher_walk_done(desc, &walk, nbytes);
  295. }
  296. cast5_fpu_end(fpu_enabled);
  297. if (walk.nbytes) {
  298. ctr_crypt_final(desc, &walk);
  299. err = blkcipher_walk_done(desc, &walk, 0);
  300. }
  301. return err;
  302. }
  303. static struct crypto_alg cast5_algs[6] = { {
  304. .cra_name = "__ecb-cast5-avx",
  305. .cra_driver_name = "__driver-ecb-cast5-avx",
  306. .cra_priority = 0,
  307. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  308. .cra_blocksize = CAST5_BLOCK_SIZE,
  309. .cra_ctxsize = sizeof(struct cast5_ctx),
  310. .cra_alignmask = 0,
  311. .cra_type = &crypto_blkcipher_type,
  312. .cra_module = THIS_MODULE,
  313. .cra_u = {
  314. .blkcipher = {
  315. .min_keysize = CAST5_MIN_KEY_SIZE,
  316. .max_keysize = CAST5_MAX_KEY_SIZE,
  317. .setkey = cast5_setkey,
  318. .encrypt = ecb_encrypt,
  319. .decrypt = ecb_decrypt,
  320. },
  321. },
  322. }, {
  323. .cra_name = "__cbc-cast5-avx",
  324. .cra_driver_name = "__driver-cbc-cast5-avx",
  325. .cra_priority = 0,
  326. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  327. .cra_blocksize = CAST5_BLOCK_SIZE,
  328. .cra_ctxsize = sizeof(struct cast5_ctx),
  329. .cra_alignmask = 0,
  330. .cra_type = &crypto_blkcipher_type,
  331. .cra_module = THIS_MODULE,
  332. .cra_u = {
  333. .blkcipher = {
  334. .min_keysize = CAST5_MIN_KEY_SIZE,
  335. .max_keysize = CAST5_MAX_KEY_SIZE,
  336. .setkey = cast5_setkey,
  337. .encrypt = cbc_encrypt,
  338. .decrypt = cbc_decrypt,
  339. },
  340. },
  341. }, {
  342. .cra_name = "__ctr-cast5-avx",
  343. .cra_driver_name = "__driver-ctr-cast5-avx",
  344. .cra_priority = 0,
  345. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  346. .cra_blocksize = 1,
  347. .cra_ctxsize = sizeof(struct cast5_ctx),
  348. .cra_alignmask = 0,
  349. .cra_type = &crypto_blkcipher_type,
  350. .cra_module = THIS_MODULE,
  351. .cra_u = {
  352. .blkcipher = {
  353. .min_keysize = CAST5_MIN_KEY_SIZE,
  354. .max_keysize = CAST5_MAX_KEY_SIZE,
  355. .ivsize = CAST5_BLOCK_SIZE,
  356. .setkey = cast5_setkey,
  357. .encrypt = ctr_crypt,
  358. .decrypt = ctr_crypt,
  359. },
  360. },
  361. }, {
  362. .cra_name = "ecb(cast5)",
  363. .cra_driver_name = "ecb-cast5-avx",
  364. .cra_priority = 200,
  365. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  366. .cra_blocksize = CAST5_BLOCK_SIZE,
  367. .cra_ctxsize = sizeof(struct async_helper_ctx),
  368. .cra_alignmask = 0,
  369. .cra_type = &crypto_ablkcipher_type,
  370. .cra_module = THIS_MODULE,
  371. .cra_init = ablk_init,
  372. .cra_exit = ablk_exit,
  373. .cra_u = {
  374. .ablkcipher = {
  375. .min_keysize = CAST5_MIN_KEY_SIZE,
  376. .max_keysize = CAST5_MAX_KEY_SIZE,
  377. .setkey = ablk_set_key,
  378. .encrypt = ablk_encrypt,
  379. .decrypt = ablk_decrypt,
  380. },
  381. },
  382. }, {
  383. .cra_name = "cbc(cast5)",
  384. .cra_driver_name = "cbc-cast5-avx",
  385. .cra_priority = 200,
  386. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  387. .cra_blocksize = CAST5_BLOCK_SIZE,
  388. .cra_ctxsize = sizeof(struct async_helper_ctx),
  389. .cra_alignmask = 0,
  390. .cra_type = &crypto_ablkcipher_type,
  391. .cra_module = THIS_MODULE,
  392. .cra_init = ablk_init,
  393. .cra_exit = ablk_exit,
  394. .cra_u = {
  395. .ablkcipher = {
  396. .min_keysize = CAST5_MIN_KEY_SIZE,
  397. .max_keysize = CAST5_MAX_KEY_SIZE,
  398. .ivsize = CAST5_BLOCK_SIZE,
  399. .setkey = ablk_set_key,
  400. .encrypt = __ablk_encrypt,
  401. .decrypt = ablk_decrypt,
  402. },
  403. },
  404. }, {
  405. .cra_name = "ctr(cast5)",
  406. .cra_driver_name = "ctr-cast5-avx",
  407. .cra_priority = 200,
  408. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  409. .cra_blocksize = 1,
  410. .cra_ctxsize = sizeof(struct async_helper_ctx),
  411. .cra_alignmask = 0,
  412. .cra_type = &crypto_ablkcipher_type,
  413. .cra_module = THIS_MODULE,
  414. .cra_init = ablk_init,
  415. .cra_exit = ablk_exit,
  416. .cra_u = {
  417. .ablkcipher = {
  418. .min_keysize = CAST5_MIN_KEY_SIZE,
  419. .max_keysize = CAST5_MAX_KEY_SIZE,
  420. .ivsize = CAST5_BLOCK_SIZE,
  421. .setkey = ablk_set_key,
  422. .encrypt = ablk_encrypt,
  423. .decrypt = ablk_encrypt,
  424. .geniv = "chainiv",
  425. },
  426. },
  427. } };
  428. static int __init cast5_init(void)
  429. {
  430. u64 xcr0;
  431. if (!cpu_has_avx || !cpu_has_osxsave) {
  432. pr_info("AVX instructions are not detected.\n");
  433. return -ENODEV;
  434. }
  435. xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  436. if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
  437. pr_info("AVX detected but unusable.\n");
  438. return -ENODEV;
  439. }
  440. return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
  441. }
  442. static void __exit cast5_exit(void)
  443. {
  444. crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
  445. }
  446. module_init(cast5_init);
  447. module_exit(cast5_exit);
  448. MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
  449. MODULE_LICENSE("GPL");
  450. MODULE_ALIAS("cast5");