ahash.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Asynchronous Cryptographic Hash operations.
  3. *
  4. * This is the asynchronous version of hash.c with notification of
  5. * completion via a callback.
  6. *
  7. * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. */
  15. #include <crypto/internal/hash.h>
  16. #include <crypto/scatterwalk.h>
  17. #include <linux/err.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/slab.h>
  22. #include <linux/seq_file.h>
  23. #include "internal.h"
  24. struct ahash_request_priv {
  25. crypto_completion_t complete;
  26. void *data;
  27. u8 *result;
  28. void *ubuf[] CRYPTO_MINALIGN_ATTR;
  29. };
  30. static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
  31. {
  32. return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
  33. halg);
  34. }
  35. static int hash_walk_next(struct crypto_hash_walk *walk)
  36. {
  37. unsigned int alignmask = walk->alignmask;
  38. unsigned int offset = walk->offset;
  39. unsigned int nbytes = min(walk->entrylen,
  40. ((unsigned int)(PAGE_SIZE)) - offset);
  41. walk->data = crypto_kmap(walk->pg, 0);
  42. walk->data += offset;
  43. if (offset & alignmask)
  44. nbytes = alignmask + 1 - (offset & alignmask);
  45. walk->entrylen -= nbytes;
  46. return nbytes;
  47. }
  48. static int hash_walk_new_entry(struct crypto_hash_walk *walk)
  49. {
  50. struct scatterlist *sg;
  51. sg = walk->sg;
  52. walk->pg = sg_page(sg);
  53. walk->offset = sg->offset;
  54. walk->entrylen = sg->length;
  55. if (walk->entrylen > walk->total)
  56. walk->entrylen = walk->total;
  57. walk->total -= walk->entrylen;
  58. return hash_walk_next(walk);
  59. }
  60. int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
  61. {
  62. unsigned int alignmask = walk->alignmask;
  63. unsigned int nbytes = walk->entrylen;
  64. walk->data -= walk->offset;
  65. if (nbytes && walk->offset & alignmask && !err) {
  66. walk->offset += alignmask - 1;
  67. walk->offset = ALIGN(walk->offset, alignmask + 1);
  68. walk->data += walk->offset;
  69. nbytes = min(nbytes,
  70. ((unsigned int)(PAGE_SIZE)) - walk->offset);
  71. walk->entrylen -= nbytes;
  72. return nbytes;
  73. }
  74. crypto_kunmap(walk->data, 0);
  75. crypto_yield(walk->flags);
  76. if (err)
  77. return err;
  78. if (nbytes) {
  79. walk->offset = 0;
  80. walk->pg++;
  81. return hash_walk_next(walk);
  82. }
  83. if (!walk->total)
  84. return 0;
  85. walk->sg = scatterwalk_sg_next(walk->sg);
  86. return hash_walk_new_entry(walk);
  87. }
  88. EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
  89. int crypto_hash_walk_first(struct ahash_request *req,
  90. struct crypto_hash_walk *walk)
  91. {
  92. walk->total = req->nbytes;
  93. if (!walk->total)
  94. return 0;
  95. walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
  96. walk->sg = req->src;
  97. walk->flags = req->base.flags;
  98. return hash_walk_new_entry(walk);
  99. }
  100. EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
  101. int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
  102. struct crypto_hash_walk *walk,
  103. struct scatterlist *sg, unsigned int len)
  104. {
  105. walk->total = len;
  106. if (!walk->total)
  107. return 0;
  108. walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
  109. walk->sg = sg;
  110. walk->flags = hdesc->flags;
  111. return hash_walk_new_entry(walk);
  112. }
  113. static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
  114. unsigned int keylen)
  115. {
  116. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  117. int ret;
  118. u8 *buffer, *alignbuffer;
  119. unsigned long absize;
  120. absize = keylen + alignmask;
  121. buffer = kmalloc(absize, GFP_KERNEL);
  122. if (!buffer)
  123. return -ENOMEM;
  124. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  125. memcpy(alignbuffer, key, keylen);
  126. ret = tfm->setkey(tfm, alignbuffer, keylen);
  127. kzfree(buffer);
  128. return ret;
  129. }
  130. int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  131. unsigned int keylen)
  132. {
  133. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  134. if ((unsigned long)key & alignmask)
  135. return ahash_setkey_unaligned(tfm, key, keylen);
  136. return tfm->setkey(tfm, key, keylen);
  137. }
  138. EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
  139. static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
  140. unsigned int keylen)
  141. {
  142. return -ENOSYS;
  143. }
  144. static inline unsigned int ahash_align_buffer_size(unsigned len,
  145. unsigned long mask)
  146. {
  147. return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
  148. }
  149. static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
  150. {
  151. struct ahash_request_priv *priv = req->priv;
  152. if (err == -EINPROGRESS)
  153. return;
  154. if (!err)
  155. memcpy(priv->result, req->result,
  156. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  157. kzfree(priv);
  158. }
  159. static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
  160. {
  161. struct ahash_request *areq = req->data;
  162. struct ahash_request_priv *priv = areq->priv;
  163. crypto_completion_t complete = priv->complete;
  164. void *data = priv->data;
  165. ahash_op_unaligned_finish(areq, err);
  166. complete(data, err);
  167. }
  168. static int ahash_op_unaligned(struct ahash_request *req,
  169. int (*op)(struct ahash_request *))
  170. {
  171. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  172. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  173. unsigned int ds = crypto_ahash_digestsize(tfm);
  174. struct ahash_request_priv *priv;
  175. int err;
  176. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  177. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  178. GFP_KERNEL : GFP_ATOMIC);
  179. if (!priv)
  180. return -ENOMEM;
  181. priv->result = req->result;
  182. priv->complete = req->base.complete;
  183. priv->data = req->base.data;
  184. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  185. req->base.complete = ahash_op_unaligned_done;
  186. req->base.data = req;
  187. req->priv = priv;
  188. err = op(req);
  189. ahash_op_unaligned_finish(req, err);
  190. return err;
  191. }
  192. static int crypto_ahash_op(struct ahash_request *req,
  193. int (*op)(struct ahash_request *))
  194. {
  195. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  196. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  197. if ((unsigned long)req->result & alignmask)
  198. return ahash_op_unaligned(req, op);
  199. return op(req);
  200. }
  201. int crypto_ahash_final(struct ahash_request *req)
  202. {
  203. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
  204. }
  205. EXPORT_SYMBOL_GPL(crypto_ahash_final);
  206. int crypto_ahash_finup(struct ahash_request *req)
  207. {
  208. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
  209. }
  210. EXPORT_SYMBOL_GPL(crypto_ahash_finup);
  211. int crypto_ahash_digest(struct ahash_request *req)
  212. {
  213. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
  214. }
  215. EXPORT_SYMBOL_GPL(crypto_ahash_digest);
  216. static void ahash_def_finup_finish2(struct ahash_request *req, int err)
  217. {
  218. struct ahash_request_priv *priv = req->priv;
  219. if (err == -EINPROGRESS)
  220. return;
  221. if (!err)
  222. memcpy(priv->result, req->result,
  223. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  224. kzfree(priv);
  225. }
  226. static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
  227. {
  228. struct ahash_request *areq = req->data;
  229. struct ahash_request_priv *priv = areq->priv;
  230. crypto_completion_t complete = priv->complete;
  231. void *data = priv->data;
  232. ahash_def_finup_finish2(areq, err);
  233. complete(data, err);
  234. }
  235. static int ahash_def_finup_finish1(struct ahash_request *req, int err)
  236. {
  237. if (err)
  238. goto out;
  239. req->base.complete = ahash_def_finup_done2;
  240. req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  241. err = crypto_ahash_reqtfm(req)->final(req);
  242. out:
  243. ahash_def_finup_finish2(req, err);
  244. return err;
  245. }
  246. static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
  247. {
  248. struct ahash_request *areq = req->data;
  249. struct ahash_request_priv *priv = areq->priv;
  250. crypto_completion_t complete = priv->complete;
  251. void *data = priv->data;
  252. err = ahash_def_finup_finish1(areq, err);
  253. complete(data, err);
  254. }
  255. static int ahash_def_finup(struct ahash_request *req)
  256. {
  257. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  258. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  259. unsigned int ds = crypto_ahash_digestsize(tfm);
  260. struct ahash_request_priv *priv;
  261. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  262. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  263. GFP_KERNEL : GFP_ATOMIC);
  264. if (!priv)
  265. return -ENOMEM;
  266. priv->result = req->result;
  267. priv->complete = req->base.complete;
  268. priv->data = req->base.data;
  269. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  270. req->base.complete = ahash_def_finup_done1;
  271. req->base.data = req;
  272. req->priv = priv;
  273. return ahash_def_finup_finish1(req, tfm->update(req));
  274. }
  275. static int ahash_no_export(struct ahash_request *req, void *out)
  276. {
  277. return -ENOSYS;
  278. }
  279. static int ahash_no_import(struct ahash_request *req, const void *in)
  280. {
  281. return -ENOSYS;
  282. }
  283. static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
  284. {
  285. struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
  286. struct ahash_alg *alg = crypto_ahash_alg(hash);
  287. hash->setkey = ahash_nosetkey;
  288. hash->export = ahash_no_export;
  289. hash->import = ahash_no_import;
  290. if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
  291. return crypto_init_shash_ops_async(tfm);
  292. hash->init = alg->init;
  293. hash->update = alg->update;
  294. hash->final = alg->final;
  295. hash->finup = alg->finup ?: ahash_def_finup;
  296. hash->digest = alg->digest;
  297. if (alg->setkey)
  298. hash->setkey = alg->setkey;
  299. if (alg->export)
  300. hash->export = alg->export;
  301. if (alg->import)
  302. hash->import = alg->import;
  303. return 0;
  304. }
  305. static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
  306. {
  307. if (alg->cra_type == &crypto_ahash_type)
  308. return alg->cra_ctxsize;
  309. return sizeof(struct crypto_shash *);
  310. }
  311. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  312. __attribute__ ((unused));
  313. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  314. {
  315. seq_printf(m, "type : ahash\n");
  316. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  317. "yes" : "no");
  318. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  319. seq_printf(m, "digestsize : %u\n",
  320. __crypto_hash_alg_common(alg)->digestsize);
  321. }
  322. const struct crypto_type crypto_ahash_type = {
  323. .extsize = crypto_ahash_extsize,
  324. .init_tfm = crypto_ahash_init_tfm,
  325. #ifdef CONFIG_PROC_FS
  326. .show = crypto_ahash_show,
  327. #endif
  328. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  329. .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
  330. .type = CRYPTO_ALG_TYPE_AHASH,
  331. .tfmsize = offsetof(struct crypto_ahash, base),
  332. };
  333. EXPORT_SYMBOL_GPL(crypto_ahash_type);
  334. struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
  335. u32 mask)
  336. {
  337. return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
  338. }
  339. EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
  340. static int ahash_prepare_alg(struct ahash_alg *alg)
  341. {
  342. struct crypto_alg *base = &alg->halg.base;
  343. if (alg->halg.digestsize > PAGE_SIZE / 8 ||
  344. alg->halg.statesize > PAGE_SIZE / 8)
  345. return -EINVAL;
  346. base->cra_type = &crypto_ahash_type;
  347. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  348. base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
  349. return 0;
  350. }
  351. int crypto_register_ahash(struct ahash_alg *alg)
  352. {
  353. struct crypto_alg *base = &alg->halg.base;
  354. int err;
  355. err = ahash_prepare_alg(alg);
  356. if (err)
  357. return err;
  358. return crypto_register_alg(base);
  359. }
  360. EXPORT_SYMBOL_GPL(crypto_register_ahash);
  361. int crypto_unregister_ahash(struct ahash_alg *alg)
  362. {
  363. return crypto_unregister_alg(&alg->halg.base);
  364. }
  365. EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
  366. int ahash_register_instance(struct crypto_template *tmpl,
  367. struct ahash_instance *inst)
  368. {
  369. int err;
  370. err = ahash_prepare_alg(&inst->alg);
  371. if (err)
  372. return err;
  373. return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
  374. }
  375. EXPORT_SYMBOL_GPL(ahash_register_instance);
  376. void ahash_free_instance(struct crypto_instance *inst)
  377. {
  378. crypto_drop_spawn(crypto_instance_ctx(inst));
  379. kfree(ahash_instance(inst));
  380. }
  381. EXPORT_SYMBOL_GPL(ahash_free_instance);
  382. int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
  383. struct hash_alg_common *alg,
  384. struct crypto_instance *inst)
  385. {
  386. return crypto_init_spawn2(&spawn->base, &alg->base, inst,
  387. &crypto_ahash_type);
  388. }
  389. EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
  390. struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
  391. {
  392. struct crypto_alg *alg;
  393. alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
  394. return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
  395. }
  396. EXPORT_SYMBOL_GPL(ahash_attr_alg);
  397. MODULE_LICENSE("GPL");
  398. MODULE_DESCRIPTION("Asynchronous cryptographic hash type");