aesni-intel_glue.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * Support for Intel AES-NI instructions. This file contains glue
  3. * code, the real AES implementation is in intel-aes_asm.S.
  4. *
  5. * Copyright (C) 2008, Intel Corp.
  6. * Author: Huang Ying <ying.huang@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/hardirq.h>
  14. #include <linux/types.h>
  15. #include <linux/crypto.h>
  16. #include <linux/err.h>
  17. #include <crypto/algapi.h>
  18. #include <crypto/aes.h>
  19. #include <crypto/cryptd.h>
  20. #include <asm/i387.h>
  21. #include <asm/aes.h>
  22. struct async_aes_ctx {
  23. struct cryptd_ablkcipher *cryptd_tfm;
  24. };
  25. #define AESNI_ALIGN 16
  26. #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
  27. asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  28. unsigned int key_len);
  29. asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  30. const u8 *in);
  31. asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  32. const u8 *in);
  33. asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  34. const u8 *in, unsigned int len);
  35. asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  36. const u8 *in, unsigned int len);
  37. asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  38. const u8 *in, unsigned int len, u8 *iv);
  39. asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  40. const u8 *in, unsigned int len, u8 *iv);
  41. static inline int kernel_fpu_using(void)
  42. {
  43. if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
  44. return 1;
  45. return 0;
  46. }
  47. static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
  48. {
  49. unsigned long addr = (unsigned long)raw_ctx;
  50. unsigned long align = AESNI_ALIGN;
  51. if (align <= crypto_tfm_ctx_alignment())
  52. align = 1;
  53. return (struct crypto_aes_ctx *)ALIGN(addr, align);
  54. }
  55. static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
  56. const u8 *in_key, unsigned int key_len)
  57. {
  58. struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
  59. u32 *flags = &tfm->crt_flags;
  60. int err;
  61. if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
  62. key_len != AES_KEYSIZE_256) {
  63. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  64. return -EINVAL;
  65. }
  66. if (kernel_fpu_using())
  67. err = crypto_aes_expand_key(ctx, in_key, key_len);
  68. else {
  69. kernel_fpu_begin();
  70. err = aesni_set_key(ctx, in_key, key_len);
  71. kernel_fpu_end();
  72. }
  73. return err;
  74. }
  75. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  76. unsigned int key_len)
  77. {
  78. return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
  79. }
  80. static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  81. {
  82. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  83. if (kernel_fpu_using())
  84. crypto_aes_encrypt_x86(ctx, dst, src);
  85. else {
  86. kernel_fpu_begin();
  87. aesni_enc(ctx, dst, src);
  88. kernel_fpu_end();
  89. }
  90. }
  91. static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  92. {
  93. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  94. if (kernel_fpu_using())
  95. crypto_aes_decrypt_x86(ctx, dst, src);
  96. else {
  97. kernel_fpu_begin();
  98. aesni_dec(ctx, dst, src);
  99. kernel_fpu_end();
  100. }
  101. }
  102. static struct crypto_alg aesni_alg = {
  103. .cra_name = "aes",
  104. .cra_driver_name = "aes-aesni",
  105. .cra_priority = 300,
  106. .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
  107. .cra_blocksize = AES_BLOCK_SIZE,
  108. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  109. .cra_alignmask = 0,
  110. .cra_module = THIS_MODULE,
  111. .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
  112. .cra_u = {
  113. .cipher = {
  114. .cia_min_keysize = AES_MIN_KEY_SIZE,
  115. .cia_max_keysize = AES_MAX_KEY_SIZE,
  116. .cia_setkey = aes_set_key,
  117. .cia_encrypt = aes_encrypt,
  118. .cia_decrypt = aes_decrypt
  119. }
  120. }
  121. };
  122. static int ecb_encrypt(struct blkcipher_desc *desc,
  123. struct scatterlist *dst, struct scatterlist *src,
  124. unsigned int nbytes)
  125. {
  126. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  127. struct blkcipher_walk walk;
  128. int err;
  129. blkcipher_walk_init(&walk, dst, src, nbytes);
  130. err = blkcipher_walk_virt(desc, &walk);
  131. kernel_fpu_begin();
  132. while ((nbytes = walk.nbytes)) {
  133. aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  134. nbytes & AES_BLOCK_MASK);
  135. nbytes &= AES_BLOCK_SIZE - 1;
  136. err = blkcipher_walk_done(desc, &walk, nbytes);
  137. }
  138. kernel_fpu_end();
  139. return err;
  140. }
  141. static int ecb_decrypt(struct blkcipher_desc *desc,
  142. struct scatterlist *dst, struct scatterlist *src,
  143. unsigned int nbytes)
  144. {
  145. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  146. struct blkcipher_walk walk;
  147. int err;
  148. blkcipher_walk_init(&walk, dst, src, nbytes);
  149. err = blkcipher_walk_virt(desc, &walk);
  150. kernel_fpu_begin();
  151. while ((nbytes = walk.nbytes)) {
  152. aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  153. nbytes & AES_BLOCK_MASK);
  154. nbytes &= AES_BLOCK_SIZE - 1;
  155. err = blkcipher_walk_done(desc, &walk, nbytes);
  156. }
  157. kernel_fpu_end();
  158. return err;
  159. }
  160. static struct crypto_alg blk_ecb_alg = {
  161. .cra_name = "__ecb-aes-aesni",
  162. .cra_driver_name = "__driver-ecb-aes-aesni",
  163. .cra_priority = 0,
  164. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  165. .cra_blocksize = AES_BLOCK_SIZE,
  166. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  167. .cra_alignmask = 0,
  168. .cra_type = &crypto_blkcipher_type,
  169. .cra_module = THIS_MODULE,
  170. .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
  171. .cra_u = {
  172. .blkcipher = {
  173. .min_keysize = AES_MIN_KEY_SIZE,
  174. .max_keysize = AES_MAX_KEY_SIZE,
  175. .setkey = aes_set_key,
  176. .encrypt = ecb_encrypt,
  177. .decrypt = ecb_decrypt,
  178. },
  179. },
  180. };
  181. static int cbc_encrypt(struct blkcipher_desc *desc,
  182. struct scatterlist *dst, struct scatterlist *src,
  183. unsigned int nbytes)
  184. {
  185. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  186. struct blkcipher_walk walk;
  187. int err;
  188. blkcipher_walk_init(&walk, dst, src, nbytes);
  189. err = blkcipher_walk_virt(desc, &walk);
  190. kernel_fpu_begin();
  191. while ((nbytes = walk.nbytes)) {
  192. aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  193. nbytes & AES_BLOCK_MASK, walk.iv);
  194. nbytes &= AES_BLOCK_SIZE - 1;
  195. err = blkcipher_walk_done(desc, &walk, nbytes);
  196. }
  197. kernel_fpu_end();
  198. return err;
  199. }
  200. static int cbc_decrypt(struct blkcipher_desc *desc,
  201. struct scatterlist *dst, struct scatterlist *src,
  202. unsigned int nbytes)
  203. {
  204. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  205. struct blkcipher_walk walk;
  206. int err;
  207. blkcipher_walk_init(&walk, dst, src, nbytes);
  208. err = blkcipher_walk_virt(desc, &walk);
  209. kernel_fpu_begin();
  210. while ((nbytes = walk.nbytes)) {
  211. aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  212. nbytes & AES_BLOCK_MASK, walk.iv);
  213. nbytes &= AES_BLOCK_SIZE - 1;
  214. err = blkcipher_walk_done(desc, &walk, nbytes);
  215. }
  216. kernel_fpu_end();
  217. return err;
  218. }
  219. static struct crypto_alg blk_cbc_alg = {
  220. .cra_name = "__cbc-aes-aesni",
  221. .cra_driver_name = "__driver-cbc-aes-aesni",
  222. .cra_priority = 0,
  223. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  224. .cra_blocksize = AES_BLOCK_SIZE,
  225. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  226. .cra_alignmask = 0,
  227. .cra_type = &crypto_blkcipher_type,
  228. .cra_module = THIS_MODULE,
  229. .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
  230. .cra_u = {
  231. .blkcipher = {
  232. .min_keysize = AES_MIN_KEY_SIZE,
  233. .max_keysize = AES_MAX_KEY_SIZE,
  234. .setkey = aes_set_key,
  235. .encrypt = cbc_encrypt,
  236. .decrypt = cbc_decrypt,
  237. },
  238. },
  239. };
  240. static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
  241. unsigned int key_len)
  242. {
  243. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  244. return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len);
  245. }
  246. static int ablk_encrypt(struct ablkcipher_request *req)
  247. {
  248. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  249. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  250. if (kernel_fpu_using()) {
  251. struct ablkcipher_request *cryptd_req =
  252. ablkcipher_request_ctx(req);
  253. memcpy(cryptd_req, req, sizeof(*req));
  254. ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  255. return crypto_ablkcipher_encrypt(cryptd_req);
  256. } else {
  257. struct blkcipher_desc desc;
  258. desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
  259. desc.info = req->info;
  260. desc.flags = 0;
  261. return crypto_blkcipher_crt(desc.tfm)->encrypt(
  262. &desc, req->dst, req->src, req->nbytes);
  263. }
  264. }
  265. static int ablk_decrypt(struct ablkcipher_request *req)
  266. {
  267. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  268. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  269. if (kernel_fpu_using()) {
  270. struct ablkcipher_request *cryptd_req =
  271. ablkcipher_request_ctx(req);
  272. memcpy(cryptd_req, req, sizeof(*req));
  273. ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  274. return crypto_ablkcipher_decrypt(cryptd_req);
  275. } else {
  276. struct blkcipher_desc desc;
  277. desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
  278. desc.info = req->info;
  279. desc.flags = 0;
  280. return crypto_blkcipher_crt(desc.tfm)->decrypt(
  281. &desc, req->dst, req->src, req->nbytes);
  282. }
  283. }
  284. static void ablk_exit(struct crypto_tfm *tfm)
  285. {
  286. struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  287. cryptd_free_ablkcipher(ctx->cryptd_tfm);
  288. }
  289. static void ablk_init_common(struct crypto_tfm *tfm,
  290. struct cryptd_ablkcipher *cryptd_tfm)
  291. {
  292. struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  293. ctx->cryptd_tfm = cryptd_tfm;
  294. tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
  295. crypto_ablkcipher_reqsize(&cryptd_tfm->base);
  296. }
  297. static int ablk_ecb_init(struct crypto_tfm *tfm)
  298. {
  299. struct cryptd_ablkcipher *cryptd_tfm;
  300. cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
  301. if (IS_ERR(cryptd_tfm))
  302. return PTR_ERR(cryptd_tfm);
  303. ablk_init_common(tfm, cryptd_tfm);
  304. return 0;
  305. }
  306. static struct crypto_alg ablk_ecb_alg = {
  307. .cra_name = "ecb(aes)",
  308. .cra_driver_name = "ecb-aes-aesni",
  309. .cra_priority = 400,
  310. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  311. .cra_blocksize = AES_BLOCK_SIZE,
  312. .cra_ctxsize = sizeof(struct async_aes_ctx),
  313. .cra_alignmask = 0,
  314. .cra_type = &crypto_ablkcipher_type,
  315. .cra_module = THIS_MODULE,
  316. .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
  317. .cra_init = ablk_ecb_init,
  318. .cra_exit = ablk_exit,
  319. .cra_u = {
  320. .ablkcipher = {
  321. .min_keysize = AES_MIN_KEY_SIZE,
  322. .max_keysize = AES_MAX_KEY_SIZE,
  323. .setkey = ablk_set_key,
  324. .encrypt = ablk_encrypt,
  325. .decrypt = ablk_decrypt,
  326. },
  327. },
  328. };
  329. static int ablk_cbc_init(struct crypto_tfm *tfm)
  330. {
  331. struct cryptd_ablkcipher *cryptd_tfm;
  332. cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
  333. if (IS_ERR(cryptd_tfm))
  334. return PTR_ERR(cryptd_tfm);
  335. ablk_init_common(tfm, cryptd_tfm);
  336. return 0;
  337. }
  338. static struct crypto_alg ablk_cbc_alg = {
  339. .cra_name = "cbc(aes)",
  340. .cra_driver_name = "cbc-aes-aesni",
  341. .cra_priority = 400,
  342. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  343. .cra_blocksize = AES_BLOCK_SIZE,
  344. .cra_ctxsize = sizeof(struct async_aes_ctx),
  345. .cra_alignmask = 0,
  346. .cra_type = &crypto_ablkcipher_type,
  347. .cra_module = THIS_MODULE,
  348. .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
  349. .cra_init = ablk_cbc_init,
  350. .cra_exit = ablk_exit,
  351. .cra_u = {
  352. .ablkcipher = {
  353. .min_keysize = AES_MIN_KEY_SIZE,
  354. .max_keysize = AES_MAX_KEY_SIZE,
  355. .ivsize = AES_BLOCK_SIZE,
  356. .setkey = ablk_set_key,
  357. .encrypt = ablk_encrypt,
  358. .decrypt = ablk_decrypt,
  359. },
  360. },
  361. };
  362. static int __init aesni_init(void)
  363. {
  364. int err;
  365. if (!cpu_has_aes) {
  366. printk(KERN_ERR "Intel AES-NI instructions are not detected.\n");
  367. return -ENODEV;
  368. }
  369. if ((err = crypto_register_alg(&aesni_alg)))
  370. goto aes_err;
  371. if ((err = crypto_register_alg(&blk_ecb_alg)))
  372. goto blk_ecb_err;
  373. if ((err = crypto_register_alg(&blk_cbc_alg)))
  374. goto blk_cbc_err;
  375. if ((err = crypto_register_alg(&ablk_ecb_alg)))
  376. goto ablk_ecb_err;
  377. if ((err = crypto_register_alg(&ablk_cbc_alg)))
  378. goto ablk_cbc_err;
  379. return err;
  380. ablk_cbc_err:
  381. crypto_unregister_alg(&ablk_ecb_alg);
  382. ablk_ecb_err:
  383. crypto_unregister_alg(&blk_cbc_alg);
  384. blk_cbc_err:
  385. crypto_unregister_alg(&blk_ecb_alg);
  386. blk_ecb_err:
  387. crypto_unregister_alg(&aesni_alg);
  388. aes_err:
  389. return err;
  390. }
  391. static void __exit aesni_exit(void)
  392. {
  393. crypto_unregister_alg(&ablk_cbc_alg);
  394. crypto_unregister_alg(&ablk_ecb_alg);
  395. crypto_unregister_alg(&blk_cbc_alg);
  396. crypto_unregister_alg(&blk_ecb_alg);
  397. crypto_unregister_alg(&aesni_alg);
  398. }
  399. module_init(aesni_init);
  400. module_exit(aesni_exit);
  401. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
  402. MODULE_LICENSE("GPL");
  403. MODULE_ALIAS("aes");