ccm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /*
  2. * CCM: Counter with CBC-MAC
  3. *
  4. * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. *
  11. */
  12. #include <crypto/internal/aead.h>
  13. #include <crypto/internal/skcipher.h>
  14. #include <crypto/scatterwalk.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct ccm_instance_ctx {
  22. struct crypto_skcipher_spawn ctr;
  23. struct crypto_spawn cipher;
  24. };
  25. struct crypto_ccm_ctx {
  26. struct crypto_cipher *cipher;
  27. struct crypto_ablkcipher *ctr;
  28. };
  29. struct crypto_rfc4309_ctx {
  30. struct crypto_aead *child;
  31. u8 nonce[3];
  32. };
  33. struct crypto_ccm_req_priv_ctx {
  34. u8 odata[16];
  35. u8 idata[16];
  36. u8 auth_tag[16];
  37. u32 ilen;
  38. u32 flags;
  39. struct scatterlist src[2];
  40. struct scatterlist dst[2];
  41. struct ablkcipher_request abreq;
  42. };
  43. static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
  44. struct aead_request *req)
  45. {
  46. unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
  47. return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
  48. }
  49. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  50. {
  51. __be32 data;
  52. memset(block, 0, csize);
  53. block += csize;
  54. if (csize >= 4)
  55. csize = 4;
  56. else if (msglen > (1 << (8 * csize)))
  57. return -EOVERFLOW;
  58. data = cpu_to_be32(msglen);
  59. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  60. return 0;
  61. }
  62. static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
  63. unsigned int keylen)
  64. {
  65. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  66. struct crypto_ablkcipher *ctr = ctx->ctr;
  67. struct crypto_cipher *tfm = ctx->cipher;
  68. int err = 0;
  69. crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  70. crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  71. CRYPTO_TFM_REQ_MASK);
  72. err = crypto_ablkcipher_setkey(ctr, key, keylen);
  73. crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
  74. CRYPTO_TFM_RES_MASK);
  75. if (err)
  76. goto out;
  77. crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
  78. crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
  79. CRYPTO_TFM_REQ_MASK);
  80. err = crypto_cipher_setkey(tfm, key, keylen);
  81. crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
  82. CRYPTO_TFM_RES_MASK);
  83. out:
  84. return err;
  85. }
  86. static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
  87. unsigned int authsize)
  88. {
  89. switch (authsize) {
  90. case 4:
  91. case 6:
  92. case 8:
  93. case 10:
  94. case 12:
  95. case 14:
  96. case 16:
  97. break;
  98. default:
  99. return -EINVAL;
  100. }
  101. return 0;
  102. }
  103. static int format_input(u8 *info, struct aead_request *req,
  104. unsigned int cryptlen)
  105. {
  106. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  107. unsigned int lp = req->iv[0];
  108. unsigned int l = lp + 1;
  109. unsigned int m;
  110. m = crypto_aead_authsize(aead);
  111. memcpy(info, req->iv, 16);
  112. /* format control info per RFC 3610 and
  113. * NIST Special Publication 800-38C
  114. */
  115. *info |= (8 * ((m - 2) / 2));
  116. if (req->assoclen)
  117. *info |= 64;
  118. return set_msg_len(info + 16 - l, cryptlen, l);
  119. }
  120. static int format_adata(u8 *adata, unsigned int a)
  121. {
  122. int len = 0;
  123. /* add control info for associated data
  124. * RFC 3610 and NIST Special Publication 800-38C
  125. */
  126. if (a < 65280) {
  127. *(__be16 *)adata = cpu_to_be16(a);
  128. len = 2;
  129. } else {
  130. *(__be16 *)adata = cpu_to_be16(0xfffe);
  131. *(__be32 *)&adata[2] = cpu_to_be32(a);
  132. len = 6;
  133. }
  134. return len;
  135. }
  136. static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
  137. struct crypto_ccm_req_priv_ctx *pctx)
  138. {
  139. unsigned int bs = 16;
  140. u8 *odata = pctx->odata;
  141. u8 *idata = pctx->idata;
  142. int datalen, getlen;
  143. datalen = n;
  144. /* first time in here, block may be partially filled. */
  145. getlen = bs - pctx->ilen;
  146. if (datalen >= getlen) {
  147. memcpy(idata + pctx->ilen, data, getlen);
  148. crypto_xor(odata, idata, bs);
  149. crypto_cipher_encrypt_one(tfm, odata, odata);
  150. datalen -= getlen;
  151. data += getlen;
  152. pctx->ilen = 0;
  153. }
  154. /* now encrypt rest of data */
  155. while (datalen >= bs) {
  156. crypto_xor(odata, data, bs);
  157. crypto_cipher_encrypt_one(tfm, odata, odata);
  158. datalen -= bs;
  159. data += bs;
  160. }
  161. /* check and see if there's leftover data that wasn't
  162. * enough to fill a block.
  163. */
  164. if (datalen) {
  165. memcpy(idata + pctx->ilen, data, datalen);
  166. pctx->ilen += datalen;
  167. }
  168. }
  169. static void get_data_to_compute(struct crypto_cipher *tfm,
  170. struct crypto_ccm_req_priv_ctx *pctx,
  171. struct scatterlist *sg, unsigned int len)
  172. {
  173. struct scatter_walk walk;
  174. u8 *data_src;
  175. int n;
  176. scatterwalk_start(&walk, sg);
  177. while (len) {
  178. n = scatterwalk_clamp(&walk, len);
  179. if (!n) {
  180. scatterwalk_start(&walk, sg_next(walk.sg));
  181. n = scatterwalk_clamp(&walk, len);
  182. }
  183. data_src = scatterwalk_map(&walk);
  184. compute_mac(tfm, data_src, n, pctx);
  185. len -= n;
  186. scatterwalk_unmap(data_src);
  187. scatterwalk_advance(&walk, n);
  188. scatterwalk_done(&walk, 0, len);
  189. if (len)
  190. crypto_yield(pctx->flags);
  191. }
  192. /* any leftover needs padding and then encrypted */
  193. if (pctx->ilen) {
  194. int padlen;
  195. u8 *odata = pctx->odata;
  196. u8 *idata = pctx->idata;
  197. padlen = 16 - pctx->ilen;
  198. memset(idata + pctx->ilen, 0, padlen);
  199. crypto_xor(odata, idata, 16);
  200. crypto_cipher_encrypt_one(tfm, odata, odata);
  201. pctx->ilen = 0;
  202. }
  203. }
  204. static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
  205. unsigned int cryptlen)
  206. {
  207. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  208. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  209. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  210. struct crypto_cipher *cipher = ctx->cipher;
  211. unsigned int assoclen = req->assoclen;
  212. u8 *odata = pctx->odata;
  213. u8 *idata = pctx->idata;
  214. int err;
  215. /* format control data for input */
  216. err = format_input(odata, req, cryptlen);
  217. if (err)
  218. goto out;
  219. /* encrypt first block to use as start in computing mac */
  220. crypto_cipher_encrypt_one(cipher, odata, odata);
  221. /* format associated data and compute into mac */
  222. if (assoclen) {
  223. pctx->ilen = format_adata(idata, assoclen);
  224. get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
  225. } else {
  226. pctx->ilen = 0;
  227. }
  228. /* compute plaintext into mac */
  229. get_data_to_compute(cipher, pctx, plain, cryptlen);
  230. out:
  231. return err;
  232. }
  233. static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
  234. {
  235. struct aead_request *req = areq->data;
  236. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  237. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  238. u8 *odata = pctx->odata;
  239. if (!err)
  240. scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
  241. crypto_aead_authsize(aead), 1);
  242. aead_request_complete(req, err);
  243. }
  244. static inline int crypto_ccm_check_iv(const u8 *iv)
  245. {
  246. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  247. if (1 > iv[0] || iv[0] > 7)
  248. return -EINVAL;
  249. return 0;
  250. }
  251. static int crypto_ccm_encrypt(struct aead_request *req)
  252. {
  253. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  254. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  255. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  256. struct ablkcipher_request *abreq = &pctx->abreq;
  257. struct scatterlist *dst;
  258. unsigned int cryptlen = req->cryptlen;
  259. u8 *odata = pctx->odata;
  260. u8 *iv = req->iv;
  261. int err;
  262. err = crypto_ccm_check_iv(iv);
  263. if (err)
  264. return err;
  265. pctx->flags = aead_request_flags(req);
  266. err = crypto_ccm_auth(req, req->src, cryptlen);
  267. if (err)
  268. return err;
  269. /* Note: rfc 3610 and NIST 800-38C require counter of
  270. * zero to encrypt auth tag.
  271. */
  272. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  273. sg_init_table(pctx->src, 2);
  274. sg_set_buf(pctx->src, odata, 16);
  275. scatterwalk_sg_chain(pctx->src, 2, req->src);
  276. dst = pctx->src;
  277. if (req->src != req->dst) {
  278. sg_init_table(pctx->dst, 2);
  279. sg_set_buf(pctx->dst, odata, 16);
  280. scatterwalk_sg_chain(pctx->dst, 2, req->dst);
  281. dst = pctx->dst;
  282. }
  283. ablkcipher_request_set_tfm(abreq, ctx->ctr);
  284. ablkcipher_request_set_callback(abreq, pctx->flags,
  285. crypto_ccm_encrypt_done, req);
  286. ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
  287. err = crypto_ablkcipher_encrypt(abreq);
  288. if (err)
  289. return err;
  290. /* copy authtag to end of dst */
  291. scatterwalk_map_and_copy(odata, req->dst, cryptlen,
  292. crypto_aead_authsize(aead), 1);
  293. return err;
  294. }
  295. static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
  296. int err)
  297. {
  298. struct aead_request *req = areq->data;
  299. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  300. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  301. unsigned int authsize = crypto_aead_authsize(aead);
  302. unsigned int cryptlen = req->cryptlen - authsize;
  303. if (!err) {
  304. err = crypto_ccm_auth(req, req->dst, cryptlen);
  305. if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
  306. err = -EBADMSG;
  307. }
  308. aead_request_complete(req, err);
  309. }
  310. static int crypto_ccm_decrypt(struct aead_request *req)
  311. {
  312. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  313. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  314. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  315. struct ablkcipher_request *abreq = &pctx->abreq;
  316. struct scatterlist *dst;
  317. unsigned int authsize = crypto_aead_authsize(aead);
  318. unsigned int cryptlen = req->cryptlen;
  319. u8 *authtag = pctx->auth_tag;
  320. u8 *odata = pctx->odata;
  321. u8 *iv = req->iv;
  322. int err;
  323. if (cryptlen < authsize)
  324. return -EINVAL;
  325. cryptlen -= authsize;
  326. err = crypto_ccm_check_iv(iv);
  327. if (err)
  328. return err;
  329. pctx->flags = aead_request_flags(req);
  330. scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
  331. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  332. sg_init_table(pctx->src, 2);
  333. sg_set_buf(pctx->src, authtag, 16);
  334. scatterwalk_sg_chain(pctx->src, 2, req->src);
  335. dst = pctx->src;
  336. if (req->src != req->dst) {
  337. sg_init_table(pctx->dst, 2);
  338. sg_set_buf(pctx->dst, authtag, 16);
  339. scatterwalk_sg_chain(pctx->dst, 2, req->dst);
  340. dst = pctx->dst;
  341. }
  342. ablkcipher_request_set_tfm(abreq, ctx->ctr);
  343. ablkcipher_request_set_callback(abreq, pctx->flags,
  344. crypto_ccm_decrypt_done, req);
  345. ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
  346. err = crypto_ablkcipher_decrypt(abreq);
  347. if (err)
  348. return err;
  349. err = crypto_ccm_auth(req, req->dst, cryptlen);
  350. if (err)
  351. return err;
  352. /* verify */
  353. if (memcmp(authtag, odata, authsize))
  354. return -EBADMSG;
  355. return err;
  356. }
  357. static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
  358. {
  359. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  360. struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
  361. struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
  362. struct crypto_cipher *cipher;
  363. struct crypto_ablkcipher *ctr;
  364. unsigned long align;
  365. int err;
  366. cipher = crypto_spawn_cipher(&ictx->cipher);
  367. if (IS_ERR(cipher))
  368. return PTR_ERR(cipher);
  369. ctr = crypto_spawn_skcipher(&ictx->ctr);
  370. err = PTR_ERR(ctr);
  371. if (IS_ERR(ctr))
  372. goto err_free_cipher;
  373. ctx->cipher = cipher;
  374. ctx->ctr = ctr;
  375. align = crypto_tfm_alg_alignmask(tfm);
  376. align &= ~(crypto_tfm_ctx_alignment() - 1);
  377. tfm->crt_aead.reqsize = align +
  378. sizeof(struct crypto_ccm_req_priv_ctx) +
  379. crypto_ablkcipher_reqsize(ctr);
  380. return 0;
  381. err_free_cipher:
  382. crypto_free_cipher(cipher);
  383. return err;
  384. }
  385. static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
  386. {
  387. struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
  388. crypto_free_cipher(ctx->cipher);
  389. crypto_free_ablkcipher(ctx->ctr);
  390. }
  391. static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
  392. const char *full_name,
  393. const char *ctr_name,
  394. const char *cipher_name)
  395. {
  396. struct crypto_attr_type *algt;
  397. struct crypto_instance *inst;
  398. struct crypto_alg *ctr;
  399. struct crypto_alg *cipher;
  400. struct ccm_instance_ctx *ictx;
  401. int err;
  402. algt = crypto_get_attr_type(tb);
  403. if (IS_ERR(algt))
  404. return ERR_CAST(algt);
  405. if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
  406. return ERR_PTR(-EINVAL);
  407. cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
  408. CRYPTO_ALG_TYPE_MASK);
  409. if (IS_ERR(cipher))
  410. return ERR_CAST(cipher);
  411. err = -EINVAL;
  412. if (cipher->cra_blocksize != 16)
  413. goto out_put_cipher;
  414. inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
  415. err = -ENOMEM;
  416. if (!inst)
  417. goto out_put_cipher;
  418. ictx = crypto_instance_ctx(inst);
  419. err = crypto_init_spawn(&ictx->cipher, cipher, inst,
  420. CRYPTO_ALG_TYPE_MASK);
  421. if (err)
  422. goto err_free_inst;
  423. crypto_set_skcipher_spawn(&ictx->ctr, inst);
  424. err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
  425. crypto_requires_sync(algt->type,
  426. algt->mask));
  427. if (err)
  428. goto err_drop_cipher;
  429. ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
  430. /* Not a stream cipher? */
  431. err = -EINVAL;
  432. if (ctr->cra_blocksize != 1)
  433. goto err_drop_ctr;
  434. /* We want the real thing! */
  435. if (ctr->cra_ablkcipher.ivsize != 16)
  436. goto err_drop_ctr;
  437. err = -ENAMETOOLONG;
  438. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  439. "ccm_base(%s,%s)", ctr->cra_driver_name,
  440. cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  441. goto err_drop_ctr;
  442. memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
  443. inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
  444. inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
  445. inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
  446. inst->alg.cra_blocksize = 1;
  447. inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
  448. (__alignof__(u32) - 1);
  449. inst->alg.cra_type = &crypto_aead_type;
  450. inst->alg.cra_aead.ivsize = 16;
  451. inst->alg.cra_aead.maxauthsize = 16;
  452. inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
  453. inst->alg.cra_init = crypto_ccm_init_tfm;
  454. inst->alg.cra_exit = crypto_ccm_exit_tfm;
  455. inst->alg.cra_aead.setkey = crypto_ccm_setkey;
  456. inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
  457. inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
  458. inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
  459. out:
  460. crypto_mod_put(cipher);
  461. return inst;
  462. err_drop_ctr:
  463. crypto_drop_skcipher(&ictx->ctr);
  464. err_drop_cipher:
  465. crypto_drop_spawn(&ictx->cipher);
  466. err_free_inst:
  467. kfree(inst);
  468. out_put_cipher:
  469. inst = ERR_PTR(err);
  470. goto out;
  471. }
  472. static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
  473. {
  474. const char *cipher_name;
  475. char ctr_name[CRYPTO_MAX_ALG_NAME];
  476. char full_name[CRYPTO_MAX_ALG_NAME];
  477. cipher_name = crypto_attr_alg_name(tb[1]);
  478. if (IS_ERR(cipher_name))
  479. return ERR_CAST(cipher_name);
  480. if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
  481. cipher_name) >= CRYPTO_MAX_ALG_NAME)
  482. return ERR_PTR(-ENAMETOOLONG);
  483. if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
  484. CRYPTO_MAX_ALG_NAME)
  485. return ERR_PTR(-ENAMETOOLONG);
  486. return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
  487. }
  488. static void crypto_ccm_free(struct crypto_instance *inst)
  489. {
  490. struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
  491. crypto_drop_spawn(&ctx->cipher);
  492. crypto_drop_skcipher(&ctx->ctr);
  493. kfree(inst);
  494. }
  495. static struct crypto_template crypto_ccm_tmpl = {
  496. .name = "ccm",
  497. .alloc = crypto_ccm_alloc,
  498. .free = crypto_ccm_free,
  499. .module = THIS_MODULE,
  500. };
  501. static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
  502. {
  503. const char *ctr_name;
  504. const char *cipher_name;
  505. char full_name[CRYPTO_MAX_ALG_NAME];
  506. ctr_name = crypto_attr_alg_name(tb[1]);
  507. if (IS_ERR(ctr_name))
  508. return ERR_CAST(ctr_name);
  509. cipher_name = crypto_attr_alg_name(tb[2]);
  510. if (IS_ERR(cipher_name))
  511. return ERR_CAST(cipher_name);
  512. if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
  513. ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
  514. return ERR_PTR(-ENAMETOOLONG);
  515. return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
  516. }
  517. static struct crypto_template crypto_ccm_base_tmpl = {
  518. .name = "ccm_base",
  519. .alloc = crypto_ccm_base_alloc,
  520. .free = crypto_ccm_free,
  521. .module = THIS_MODULE,
  522. };
  523. static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
  524. unsigned int keylen)
  525. {
  526. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
  527. struct crypto_aead *child = ctx->child;
  528. int err;
  529. if (keylen < 3)
  530. return -EINVAL;
  531. keylen -= 3;
  532. memcpy(ctx->nonce, key + keylen, 3);
  533. crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  534. crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
  535. CRYPTO_TFM_REQ_MASK);
  536. err = crypto_aead_setkey(child, key, keylen);
  537. crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
  538. CRYPTO_TFM_RES_MASK);
  539. return err;
  540. }
  541. static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
  542. unsigned int authsize)
  543. {
  544. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
  545. switch (authsize) {
  546. case 8:
  547. case 12:
  548. case 16:
  549. break;
  550. default:
  551. return -EINVAL;
  552. }
  553. return crypto_aead_setauthsize(ctx->child, authsize);
  554. }
  555. static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
  556. {
  557. struct aead_request *subreq = aead_request_ctx(req);
  558. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  559. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
  560. struct crypto_aead *child = ctx->child;
  561. u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
  562. crypto_aead_alignmask(child) + 1);
  563. /* L' */
  564. iv[0] = 3;
  565. memcpy(iv + 1, ctx->nonce, 3);
  566. memcpy(iv + 4, req->iv, 8);
  567. aead_request_set_tfm(subreq, child);
  568. aead_request_set_callback(subreq, req->base.flags, req->base.complete,
  569. req->base.data);
  570. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
  571. aead_request_set_assoc(subreq, req->assoc, req->assoclen);
  572. return subreq;
  573. }
  574. static int crypto_rfc4309_encrypt(struct aead_request *req)
  575. {
  576. req = crypto_rfc4309_crypt(req);
  577. return crypto_aead_encrypt(req);
  578. }
  579. static int crypto_rfc4309_decrypt(struct aead_request *req)
  580. {
  581. req = crypto_rfc4309_crypt(req);
  582. return crypto_aead_decrypt(req);
  583. }
  584. static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
  585. {
  586. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  587. struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
  588. struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
  589. struct crypto_aead *aead;
  590. unsigned long align;
  591. aead = crypto_spawn_aead(spawn);
  592. if (IS_ERR(aead))
  593. return PTR_ERR(aead);
  594. ctx->child = aead;
  595. align = crypto_aead_alignmask(aead);
  596. align &= ~(crypto_tfm_ctx_alignment() - 1);
  597. tfm->crt_aead.reqsize = sizeof(struct aead_request) +
  598. ALIGN(crypto_aead_reqsize(aead),
  599. crypto_tfm_ctx_alignment()) +
  600. align + 16;
  601. return 0;
  602. }
  603. static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
  604. {
  605. struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
  606. crypto_free_aead(ctx->child);
  607. }
  608. static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
  609. {
  610. struct crypto_attr_type *algt;
  611. struct crypto_instance *inst;
  612. struct crypto_aead_spawn *spawn;
  613. struct crypto_alg *alg;
  614. const char *ccm_name;
  615. int err;
  616. algt = crypto_get_attr_type(tb);
  617. if (IS_ERR(algt))
  618. return ERR_CAST(algt);
  619. if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
  620. return ERR_PTR(-EINVAL);
  621. ccm_name = crypto_attr_alg_name(tb[1]);
  622. if (IS_ERR(ccm_name))
  623. return ERR_CAST(ccm_name);
  624. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  625. if (!inst)
  626. return ERR_PTR(-ENOMEM);
  627. spawn = crypto_instance_ctx(inst);
  628. crypto_set_aead_spawn(spawn, inst);
  629. err = crypto_grab_aead(spawn, ccm_name, 0,
  630. crypto_requires_sync(algt->type, algt->mask));
  631. if (err)
  632. goto out_free_inst;
  633. alg = crypto_aead_spawn_alg(spawn);
  634. err = -EINVAL;
  635. /* We only support 16-byte blocks. */
  636. if (alg->cra_aead.ivsize != 16)
  637. goto out_drop_alg;
  638. /* Not a stream cipher? */
  639. if (alg->cra_blocksize != 1)
  640. goto out_drop_alg;
  641. err = -ENAMETOOLONG;
  642. if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  643. "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
  644. snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  645. "rfc4309(%s)", alg->cra_driver_name) >=
  646. CRYPTO_MAX_ALG_NAME)
  647. goto out_drop_alg;
  648. inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
  649. inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  650. inst->alg.cra_priority = alg->cra_priority;
  651. inst->alg.cra_blocksize = 1;
  652. inst->alg.cra_alignmask = alg->cra_alignmask;
  653. inst->alg.cra_type = &crypto_nivaead_type;
  654. inst->alg.cra_aead.ivsize = 8;
  655. inst->alg.cra_aead.maxauthsize = 16;
  656. inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
  657. inst->alg.cra_init = crypto_rfc4309_init_tfm;
  658. inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
  659. inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
  660. inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
  661. inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
  662. inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
  663. inst->alg.cra_aead.geniv = "seqiv";
  664. out:
  665. return inst;
  666. out_drop_alg:
  667. crypto_drop_aead(spawn);
  668. out_free_inst:
  669. kfree(inst);
  670. inst = ERR_PTR(err);
  671. goto out;
  672. }
  673. static void crypto_rfc4309_free(struct crypto_instance *inst)
  674. {
  675. crypto_drop_spawn(crypto_instance_ctx(inst));
  676. kfree(inst);
  677. }
  678. static struct crypto_template crypto_rfc4309_tmpl = {
  679. .name = "rfc4309",
  680. .alloc = crypto_rfc4309_alloc,
  681. .free = crypto_rfc4309_free,
  682. .module = THIS_MODULE,
  683. };
  684. static int __init crypto_ccm_module_init(void)
  685. {
  686. int err;
  687. err = crypto_register_template(&crypto_ccm_base_tmpl);
  688. if (err)
  689. goto out;
  690. err = crypto_register_template(&crypto_ccm_tmpl);
  691. if (err)
  692. goto out_undo_base;
  693. err = crypto_register_template(&crypto_rfc4309_tmpl);
  694. if (err)
  695. goto out_undo_ccm;
  696. out:
  697. return err;
  698. out_undo_ccm:
  699. crypto_unregister_template(&crypto_ccm_tmpl);
  700. out_undo_base:
  701. crypto_unregister_template(&crypto_ccm_base_tmpl);
  702. goto out;
  703. }
  704. static void __exit crypto_ccm_module_exit(void)
  705. {
  706. crypto_unregister_template(&crypto_rfc4309_tmpl);
  707. crypto_unregister_template(&crypto_ccm_tmpl);
  708. crypto_unregister_template(&crypto_ccm_base_tmpl);
  709. }
  710. module_init(crypto_ccm_module_init);
  711. module_exit(crypto_ccm_module_exit);
  712. MODULE_LICENSE("GPL");
  713. MODULE_DESCRIPTION("Counter with CBC MAC");
  714. MODULE_ALIAS("ccm_base");
  715. MODULE_ALIAS("rfc4309");