blkcipher.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /*
  2. * Block chaining cipher operations.
  3. *
  4. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5. * multiple page boundaries by using temporary blocks. In user context,
  6. * the kernel is given a chance to schedule us once per page.
  7. *
  8. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. *
  15. */
  16. #include <linux/crypto.h>
  17. #include <linux/errno.h>
  18. #include <linux/hardirq.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/string.h>
  25. #include "internal.h"
  26. #include "scatterwalk.h"
  27. enum {
  28. BLKCIPHER_WALK_PHYS = 1 << 0,
  29. BLKCIPHER_WALK_SLOW = 1 << 1,
  30. BLKCIPHER_WALK_COPY = 1 << 2,
  31. BLKCIPHER_WALK_DIFF = 1 << 3,
  32. };
  33. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  34. struct blkcipher_walk *walk);
  35. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  36. struct blkcipher_walk *walk);
  37. static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  38. {
  39. walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
  40. }
  41. static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  42. {
  43. walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
  44. }
  45. static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  46. {
  47. scatterwalk_unmap(walk->src.virt.addr, 0);
  48. }
  49. static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  50. {
  51. scatterwalk_unmap(walk->dst.virt.addr, 1);
  52. }
  53. /* Get a spot of the specified length that does not straddle a page.
  54. * The caller needs to ensure that there is enough space for this operation.
  55. */
  56. static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  57. {
  58. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  59. return max(start, end_page);
  60. }
  61. static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  62. struct blkcipher_walk *walk,
  63. unsigned int bsize)
  64. {
  65. u8 *addr;
  66. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  67. addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  68. addr = blkcipher_get_spot(addr, bsize);
  69. scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  70. return bsize;
  71. }
  72. static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  73. unsigned int n)
  74. {
  75. n = walk->nbytes - n;
  76. if (walk->flags & BLKCIPHER_WALK_COPY) {
  77. blkcipher_map_dst(walk);
  78. memcpy(walk->dst.virt.addr, walk->page, n);
  79. blkcipher_unmap_dst(walk);
  80. } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  81. blkcipher_unmap_src(walk);
  82. if (walk->flags & BLKCIPHER_WALK_DIFF)
  83. blkcipher_unmap_dst(walk);
  84. }
  85. scatterwalk_advance(&walk->in, n);
  86. scatterwalk_advance(&walk->out, n);
  87. return n;
  88. }
  89. int blkcipher_walk_done(struct blkcipher_desc *desc,
  90. struct blkcipher_walk *walk, int err)
  91. {
  92. struct crypto_blkcipher *tfm = desc->tfm;
  93. unsigned int nbytes = 0;
  94. if (likely(err >= 0)) {
  95. unsigned int bsize = crypto_blkcipher_blocksize(tfm);
  96. unsigned int n;
  97. if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
  98. n = blkcipher_done_fast(walk, err);
  99. else
  100. n = blkcipher_done_slow(tfm, walk, bsize);
  101. nbytes = walk->total - n;
  102. err = 0;
  103. }
  104. scatterwalk_done(&walk->in, 0, nbytes);
  105. scatterwalk_done(&walk->out, 1, nbytes);
  106. walk->total = nbytes;
  107. walk->nbytes = nbytes;
  108. if (nbytes) {
  109. crypto_yield(desc->flags);
  110. return blkcipher_walk_next(desc, walk);
  111. }
  112. if (walk->iv != desc->info)
  113. memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
  114. if (walk->buffer != walk->page)
  115. kfree(walk->buffer);
  116. if (walk->page)
  117. free_page((unsigned long)walk->page);
  118. return err;
  119. }
  120. EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  121. static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  122. struct blkcipher_walk *walk,
  123. unsigned int bsize,
  124. unsigned int alignmask)
  125. {
  126. unsigned int n;
  127. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  128. if (walk->buffer)
  129. goto ok;
  130. walk->buffer = walk->page;
  131. if (walk->buffer)
  132. goto ok;
  133. n = aligned_bsize * 3 - (alignmask + 1) +
  134. (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  135. walk->buffer = kmalloc(n, GFP_ATOMIC);
  136. if (!walk->buffer)
  137. return blkcipher_walk_done(desc, walk, -ENOMEM);
  138. ok:
  139. walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  140. alignmask + 1);
  141. walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  142. walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  143. aligned_bsize, bsize);
  144. scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  145. walk->nbytes = bsize;
  146. walk->flags |= BLKCIPHER_WALK_SLOW;
  147. return 0;
  148. }
  149. static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  150. {
  151. u8 *tmp = walk->page;
  152. blkcipher_map_src(walk);
  153. memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  154. blkcipher_unmap_src(walk);
  155. walk->src.virt.addr = tmp;
  156. walk->dst.virt.addr = tmp;
  157. return 0;
  158. }
  159. static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  160. struct blkcipher_walk *walk)
  161. {
  162. unsigned long diff;
  163. walk->src.phys.page = scatterwalk_page(&walk->in);
  164. walk->src.phys.offset = offset_in_page(walk->in.offset);
  165. walk->dst.phys.page = scatterwalk_page(&walk->out);
  166. walk->dst.phys.offset = offset_in_page(walk->out.offset);
  167. if (walk->flags & BLKCIPHER_WALK_PHYS)
  168. return 0;
  169. diff = walk->src.phys.offset - walk->dst.phys.offset;
  170. diff |= walk->src.virt.page - walk->dst.virt.page;
  171. blkcipher_map_src(walk);
  172. walk->dst.virt.addr = walk->src.virt.addr;
  173. if (diff) {
  174. walk->flags |= BLKCIPHER_WALK_DIFF;
  175. blkcipher_map_dst(walk);
  176. }
  177. return 0;
  178. }
  179. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  180. struct blkcipher_walk *walk)
  181. {
  182. struct crypto_blkcipher *tfm = desc->tfm;
  183. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  184. unsigned int bsize = crypto_blkcipher_blocksize(tfm);
  185. unsigned int n;
  186. int err;
  187. n = walk->total;
  188. if (unlikely(n < bsize)) {
  189. desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  190. return blkcipher_walk_done(desc, walk, -EINVAL);
  191. }
  192. walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  193. BLKCIPHER_WALK_DIFF);
  194. if (!scatterwalk_aligned(&walk->in, alignmask) ||
  195. !scatterwalk_aligned(&walk->out, alignmask)) {
  196. walk->flags |= BLKCIPHER_WALK_COPY;
  197. if (!walk->page) {
  198. walk->page = (void *)__get_free_page(GFP_ATOMIC);
  199. if (!walk->page)
  200. n = 0;
  201. }
  202. }
  203. n = scatterwalk_clamp(&walk->in, n);
  204. n = scatterwalk_clamp(&walk->out, n);
  205. if (unlikely(n < bsize)) {
  206. err = blkcipher_next_slow(desc, walk, bsize, alignmask);
  207. goto set_phys_lowmem;
  208. }
  209. walk->nbytes = n;
  210. if (walk->flags & BLKCIPHER_WALK_COPY) {
  211. err = blkcipher_next_copy(walk);
  212. goto set_phys_lowmem;
  213. }
  214. return blkcipher_next_fast(desc, walk);
  215. set_phys_lowmem:
  216. if (walk->flags & BLKCIPHER_WALK_PHYS) {
  217. walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  218. walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  219. walk->src.phys.offset &= PAGE_SIZE - 1;
  220. walk->dst.phys.offset &= PAGE_SIZE - 1;
  221. }
  222. return err;
  223. }
  224. static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
  225. struct crypto_blkcipher *tfm,
  226. unsigned int alignmask)
  227. {
  228. unsigned bs = crypto_blkcipher_blocksize(tfm);
  229. unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
  230. unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  231. unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  232. (alignmask + 1);
  233. u8 *iv;
  234. size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  235. walk->buffer = kmalloc(size, GFP_ATOMIC);
  236. if (!walk->buffer)
  237. return -ENOMEM;
  238. iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  239. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  240. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  241. iv = blkcipher_get_spot(iv, ivsize);
  242. walk->iv = memcpy(iv, walk->iv, ivsize);
  243. return 0;
  244. }
  245. int blkcipher_walk_virt(struct blkcipher_desc *desc,
  246. struct blkcipher_walk *walk)
  247. {
  248. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  249. return blkcipher_walk_first(desc, walk);
  250. }
  251. EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  252. int blkcipher_walk_phys(struct blkcipher_desc *desc,
  253. struct blkcipher_walk *walk)
  254. {
  255. walk->flags |= BLKCIPHER_WALK_PHYS;
  256. return blkcipher_walk_first(desc, walk);
  257. }
  258. EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  259. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  260. struct blkcipher_walk *walk)
  261. {
  262. struct crypto_blkcipher *tfm = desc->tfm;
  263. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  264. if (WARN_ON_ONCE(in_irq()))
  265. return -EDEADLK;
  266. walk->nbytes = walk->total;
  267. if (unlikely(!walk->total))
  268. return 0;
  269. walk->buffer = NULL;
  270. walk->iv = desc->info;
  271. if (unlikely(((unsigned long)walk->iv & alignmask))) {
  272. int err = blkcipher_copy_iv(walk, tfm, alignmask);
  273. if (err)
  274. return err;
  275. }
  276. scatterwalk_start(&walk->in, walk->in.sg);
  277. scatterwalk_start(&walk->out, walk->out.sg);
  278. walk->page = NULL;
  279. return blkcipher_walk_next(desc, walk);
  280. }
  281. static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  282. unsigned int keylen)
  283. {
  284. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  285. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  286. int ret;
  287. u8 *buffer, *alignbuffer;
  288. unsigned long absize;
  289. absize = keylen + alignmask;
  290. buffer = kmalloc(absize, GFP_ATOMIC);
  291. if (!buffer)
  292. return -ENOMEM;
  293. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  294. memcpy(alignbuffer, key, keylen);
  295. ret = cipher->setkey(tfm, alignbuffer, keylen);
  296. memset(alignbuffer, 0, keylen);
  297. kfree(buffer);
  298. return ret;
  299. }
  300. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  301. {
  302. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  303. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  304. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  305. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  306. return -EINVAL;
  307. }
  308. if ((unsigned long)key & alignmask)
  309. return setkey_unaligned(tfm, key, keylen);
  310. return cipher->setkey(tfm, key, keylen);
  311. }
  312. static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  313. unsigned int keylen)
  314. {
  315. return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  316. }
  317. static int async_encrypt(struct ablkcipher_request *req)
  318. {
  319. struct crypto_tfm *tfm = req->base.tfm;
  320. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  321. struct blkcipher_desc desc = {
  322. .tfm = __crypto_blkcipher_cast(tfm),
  323. .info = req->info,
  324. .flags = req->base.flags,
  325. };
  326. return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  327. }
  328. static int async_decrypt(struct ablkcipher_request *req)
  329. {
  330. struct crypto_tfm *tfm = req->base.tfm;
  331. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  332. struct blkcipher_desc desc = {
  333. .tfm = __crypto_blkcipher_cast(tfm),
  334. .info = req->info,
  335. .flags = req->base.flags,
  336. };
  337. return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  338. }
  339. static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  340. u32 mask)
  341. {
  342. struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  343. unsigned int len = alg->cra_ctxsize;
  344. type ^= CRYPTO_ALG_ASYNC;
  345. mask &= CRYPTO_ALG_ASYNC;
  346. if ((type & mask) && cipher->ivsize) {
  347. len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  348. len += cipher->ivsize;
  349. }
  350. return len;
  351. }
  352. static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  353. {
  354. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  355. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  356. crt->setkey = async_setkey;
  357. crt->encrypt = async_encrypt;
  358. crt->decrypt = async_decrypt;
  359. crt->ivsize = alg->ivsize;
  360. return 0;
  361. }
  362. static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
  363. {
  364. struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  365. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  366. unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  367. unsigned long addr;
  368. crt->setkey = setkey;
  369. crt->encrypt = alg->encrypt;
  370. crt->decrypt = alg->decrypt;
  371. addr = (unsigned long)crypto_tfm_ctx(tfm);
  372. addr = ALIGN(addr, align);
  373. addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  374. crt->iv = (void *)addr;
  375. return 0;
  376. }
  377. static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  378. {
  379. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  380. if (alg->ivsize > PAGE_SIZE / 8)
  381. return -EINVAL;
  382. type ^= CRYPTO_ALG_ASYNC;
  383. mask &= CRYPTO_ALG_ASYNC;
  384. if (type & mask)
  385. return crypto_init_blkcipher_ops_sync(tfm);
  386. else
  387. return crypto_init_blkcipher_ops_async(tfm);
  388. }
  389. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  390. __attribute__ ((unused));
  391. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  392. {
  393. seq_printf(m, "type : blkcipher\n");
  394. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  395. seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  396. seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  397. seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  398. }
  399. const struct crypto_type crypto_blkcipher_type = {
  400. .ctxsize = crypto_blkcipher_ctxsize,
  401. .init = crypto_init_blkcipher_ops,
  402. #ifdef CONFIG_PROC_FS
  403. .show = crypto_blkcipher_show,
  404. #endif
  405. };
  406. EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  407. MODULE_LICENSE("GPL");
  408. MODULE_DESCRIPTION("Generic block chaining cipher type");