cipher.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Cipher operations.
  5. *
  6. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  7. * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. */
  15. #include <linux/compiler.h>
  16. #include <linux/kernel.h>
  17. #include <linux/crypto.h>
  18. #include <linux/errno.h>
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/string.h>
  22. #include <asm/scatterlist.h>
  23. #include "internal.h"
  24. #include "scatterwalk.h"
  25. struct cipher_desc {
  26. struct crypto_tfm *tfm;
  27. void (*crfn)(void *ctx, u8 *dst, const u8 *src);
  28. unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
  29. const u8 *src, unsigned int nbytes);
  30. void *info;
  31. };
  32. static inline void xor_64(u8 *a, const u8 *b)
  33. {
  34. ((u32 *)a)[0] ^= ((u32 *)b)[0];
  35. ((u32 *)a)[1] ^= ((u32 *)b)[1];
  36. }
  37. static inline void xor_128(u8 *a, const u8 *b)
  38. {
  39. ((u32 *)a)[0] ^= ((u32 *)b)[0];
  40. ((u32 *)a)[1] ^= ((u32 *)b)[1];
  41. ((u32 *)a)[2] ^= ((u32 *)b)[2];
  42. ((u32 *)a)[3] ^= ((u32 *)b)[3];
  43. }
  44. static unsigned int crypt_slow(const struct cipher_desc *desc,
  45. struct scatter_walk *in,
  46. struct scatter_walk *out, unsigned int bsize)
  47. {
  48. u8 src[bsize];
  49. u8 dst[bsize];
  50. unsigned int n;
  51. n = scatterwalk_copychunks(src, in, bsize, 0);
  52. scatterwalk_advance(in, n);
  53. desc->prfn(desc, dst, src, bsize);
  54. n = scatterwalk_copychunks(dst, out, bsize, 1);
  55. scatterwalk_advance(out, n);
  56. return bsize;
  57. }
  58. static inline unsigned int crypt_fast(const struct cipher_desc *desc,
  59. struct scatter_walk *in,
  60. struct scatter_walk *out,
  61. unsigned int nbytes)
  62. {
  63. u8 *src, *dst;
  64. src = in->data;
  65. dst = scatterwalk_samebuf(in, out) ? src : out->data;
  66. nbytes = desc->prfn(desc, dst, src, nbytes);
  67. scatterwalk_advance(in, nbytes);
  68. scatterwalk_advance(out, nbytes);
  69. return nbytes;
  70. }
  71. /*
  72. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  73. * multiple page boundaries by using temporary blocks. In user context,
  74. * the kernel is given a chance to schedule us once per page.
  75. */
  76. static int crypt(const struct cipher_desc *desc,
  77. struct scatterlist *dst,
  78. struct scatterlist *src,
  79. unsigned int nbytes)
  80. {
  81. struct scatter_walk walk_in, walk_out;
  82. struct crypto_tfm *tfm = desc->tfm;
  83. const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
  84. if (!nbytes)
  85. return 0;
  86. if (nbytes % bsize) {
  87. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  88. return -EINVAL;
  89. }
  90. scatterwalk_start(&walk_in, src);
  91. scatterwalk_start(&walk_out, dst);
  92. for(;;) {
  93. unsigned int n;
  94. scatterwalk_map(&walk_in, 0);
  95. scatterwalk_map(&walk_out, 1);
  96. n = scatterwalk_clamp(&walk_in, nbytes);
  97. n = scatterwalk_clamp(&walk_out, n);
  98. if (likely(n >= bsize))
  99. n = crypt_fast(desc, &walk_in, &walk_out, n);
  100. else
  101. n = crypt_slow(desc, &walk_in, &walk_out, bsize);
  102. nbytes -= n;
  103. scatterwalk_done(&walk_in, 0, nbytes);
  104. scatterwalk_done(&walk_out, 1, nbytes);
  105. if (!nbytes)
  106. return 0;
  107. crypto_yield(tfm);
  108. }
  109. }
  110. static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
  111. u8 *dst, const u8 *src,
  112. unsigned int nbytes)
  113. {
  114. struct crypto_tfm *tfm = desc->tfm;
  115. void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
  116. int bsize = crypto_tfm_alg_blocksize(tfm);
  117. void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
  118. u8 *iv = desc->info;
  119. unsigned int done = 0;
  120. do {
  121. xor(iv, src);
  122. fn(crypto_tfm_ctx(tfm), dst, iv);
  123. memcpy(iv, dst, bsize);
  124. src += bsize;
  125. dst += bsize;
  126. } while ((done += bsize) < nbytes);
  127. return done;
  128. }
  129. static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
  130. u8 *dst, const u8 *src,
  131. unsigned int nbytes)
  132. {
  133. struct crypto_tfm *tfm = desc->tfm;
  134. void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
  135. int bsize = crypto_tfm_alg_blocksize(tfm);
  136. u8 stack[src == dst ? bsize : 0];
  137. u8 *buf = stack;
  138. u8 **dst_p = src == dst ? &buf : &dst;
  139. void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
  140. u8 *iv = desc->info;
  141. unsigned int done = 0;
  142. do {
  143. u8 *tmp_dst = *dst_p;
  144. fn(crypto_tfm_ctx(tfm), tmp_dst, src);
  145. xor(tmp_dst, iv);
  146. memcpy(iv, src, bsize);
  147. if (tmp_dst != dst)
  148. memcpy(dst, tmp_dst, bsize);
  149. src += bsize;
  150. dst += bsize;
  151. } while ((done += bsize) < nbytes);
  152. return done;
  153. }
  154. static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
  155. const u8 *src, unsigned int nbytes)
  156. {
  157. struct crypto_tfm *tfm = desc->tfm;
  158. int bsize = crypto_tfm_alg_blocksize(tfm);
  159. void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
  160. unsigned int done = 0;
  161. do {
  162. fn(crypto_tfm_ctx(tfm), dst, src);
  163. src += bsize;
  164. dst += bsize;
  165. } while ((done += bsize) < nbytes);
  166. return done;
  167. }
  168. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  169. {
  170. struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
  171. if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
  172. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  173. return -EINVAL;
  174. } else
  175. return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
  176. &tfm->crt_flags);
  177. }
  178. static int ecb_encrypt(struct crypto_tfm *tfm,
  179. struct scatterlist *dst,
  180. struct scatterlist *src, unsigned int nbytes)
  181. {
  182. struct cipher_desc desc;
  183. desc.tfm = tfm;
  184. desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
  185. desc.prfn = ecb_process;
  186. return crypt(&desc, dst, src, nbytes);
  187. }
  188. static int ecb_decrypt(struct crypto_tfm *tfm,
  189. struct scatterlist *dst,
  190. struct scatterlist *src,
  191. unsigned int nbytes)
  192. {
  193. struct cipher_desc desc;
  194. desc.tfm = tfm;
  195. desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
  196. desc.prfn = ecb_process;
  197. return crypt(&desc, dst, src, nbytes);
  198. }
  199. static int cbc_encrypt(struct crypto_tfm *tfm,
  200. struct scatterlist *dst,
  201. struct scatterlist *src,
  202. unsigned int nbytes)
  203. {
  204. struct cipher_desc desc;
  205. desc.tfm = tfm;
  206. desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
  207. desc.prfn = cbc_process_encrypt;
  208. desc.info = tfm->crt_cipher.cit_iv;
  209. return crypt(&desc, dst, src, nbytes);
  210. }
  211. static int cbc_encrypt_iv(struct crypto_tfm *tfm,
  212. struct scatterlist *dst,
  213. struct scatterlist *src,
  214. unsigned int nbytes, u8 *iv)
  215. {
  216. struct cipher_desc desc;
  217. desc.tfm = tfm;
  218. desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
  219. desc.prfn = cbc_process_encrypt;
  220. desc.info = iv;
  221. return crypt(&desc, dst, src, nbytes);
  222. }
  223. static int cbc_decrypt(struct crypto_tfm *tfm,
  224. struct scatterlist *dst,
  225. struct scatterlist *src,
  226. unsigned int nbytes)
  227. {
  228. struct cipher_desc desc;
  229. desc.tfm = tfm;
  230. desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
  231. desc.prfn = cbc_process_decrypt;
  232. desc.info = tfm->crt_cipher.cit_iv;
  233. return crypt(&desc, dst, src, nbytes);
  234. }
  235. static int cbc_decrypt_iv(struct crypto_tfm *tfm,
  236. struct scatterlist *dst,
  237. struct scatterlist *src,
  238. unsigned int nbytes, u8 *iv)
  239. {
  240. struct cipher_desc desc;
  241. desc.tfm = tfm;
  242. desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
  243. desc.prfn = cbc_process_decrypt;
  244. desc.info = iv;
  245. return crypt(&desc, dst, src, nbytes);
  246. }
  247. static int nocrypt(struct crypto_tfm *tfm,
  248. struct scatterlist *dst,
  249. struct scatterlist *src,
  250. unsigned int nbytes)
  251. {
  252. return -ENOSYS;
  253. }
  254. static int nocrypt_iv(struct crypto_tfm *tfm,
  255. struct scatterlist *dst,
  256. struct scatterlist *src,
  257. unsigned int nbytes, u8 *iv)
  258. {
  259. return -ENOSYS;
  260. }
  261. int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
  262. {
  263. u32 mode = flags & CRYPTO_TFM_MODE_MASK;
  264. tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
  265. if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
  266. tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
  267. return 0;
  268. }
  269. int crypto_init_cipher_ops(struct crypto_tfm *tfm)
  270. {
  271. int ret = 0;
  272. struct cipher_tfm *ops = &tfm->crt_cipher;
  273. ops->cit_setkey = setkey;
  274. switch (tfm->crt_cipher.cit_mode) {
  275. case CRYPTO_TFM_MODE_ECB:
  276. ops->cit_encrypt = ecb_encrypt;
  277. ops->cit_decrypt = ecb_decrypt;
  278. break;
  279. case CRYPTO_TFM_MODE_CBC:
  280. ops->cit_encrypt = cbc_encrypt;
  281. ops->cit_decrypt = cbc_decrypt;
  282. ops->cit_encrypt_iv = cbc_encrypt_iv;
  283. ops->cit_decrypt_iv = cbc_decrypt_iv;
  284. break;
  285. case CRYPTO_TFM_MODE_CFB:
  286. ops->cit_encrypt = nocrypt;
  287. ops->cit_decrypt = nocrypt;
  288. ops->cit_encrypt_iv = nocrypt_iv;
  289. ops->cit_decrypt_iv = nocrypt_iv;
  290. break;
  291. case CRYPTO_TFM_MODE_CTR:
  292. ops->cit_encrypt = nocrypt;
  293. ops->cit_decrypt = nocrypt;
  294. ops->cit_encrypt_iv = nocrypt_iv;
  295. ops->cit_decrypt_iv = nocrypt_iv;
  296. break;
  297. default:
  298. BUG();
  299. }
  300. if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
  301. switch (crypto_tfm_alg_blocksize(tfm)) {
  302. case 8:
  303. ops->cit_xor_block = xor_64;
  304. break;
  305. case 16:
  306. ops->cit_xor_block = xor_128;
  307. break;
  308. default:
  309. printk(KERN_WARNING "%s: block size %u not supported\n",
  310. crypto_tfm_alg_name(tfm),
  311. crypto_tfm_alg_blocksize(tfm));
  312. ret = -EINVAL;
  313. goto out;
  314. }
  315. ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
  316. ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
  317. if (ops->cit_iv == NULL)
  318. ret = -ENOMEM;
  319. }
  320. out:
  321. return ret;
  322. }
  323. void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
  324. {
  325. kfree(tfm->crt_cipher.cit_iv);
  326. }