nx-aes-ccm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /**
  2. * AES CCM routines supporting the Power 7+ Nest Accelerators driver
  3. *
  4. * Copyright (C) 2012 International Business Machines Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 only.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Author: Kent Yoder <yoder1@us.ibm.com>
  20. */
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/algapi.h>
  24. #include <crypto/scatterwalk.h>
  25. #include <linux/module.h>
  26. #include <linux/types.h>
  27. #include <linux/crypto.h>
  28. #include <asm/vio.h>
  29. #include "nx_csbcpb.h"
  30. #include "nx.h"
  31. static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
  32. const u8 *in_key,
  33. unsigned int key_len)
  34. {
  35. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  36. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  37. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  38. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  39. switch (key_len) {
  40. case AES_KEYSIZE_128:
  41. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  42. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  43. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
  49. memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
  50. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
  51. memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
  52. return 0;
  53. }
  54. static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
  55. const u8 *in_key,
  56. unsigned int key_len)
  57. {
  58. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  59. if (key_len < 3)
  60. return -EINVAL;
  61. key_len -= 3;
  62. memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
  63. return ccm_aes_nx_set_key(tfm, in_key, key_len);
  64. }
  65. static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
  66. unsigned int authsize)
  67. {
  68. switch (authsize) {
  69. case 4:
  70. case 6:
  71. case 8:
  72. case 10:
  73. case 12:
  74. case 14:
  75. case 16:
  76. break;
  77. default:
  78. return -EINVAL;
  79. }
  80. crypto_aead_crt(tfm)->authsize = authsize;
  81. return 0;
  82. }
  83. static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
  84. unsigned int authsize)
  85. {
  86. switch (authsize) {
  87. case 8:
  88. case 12:
  89. case 16:
  90. break;
  91. default:
  92. return -EINVAL;
  93. }
  94. crypto_aead_crt(tfm)->authsize = authsize;
  95. return 0;
  96. }
  97. /* taken from crypto/ccm.c */
  98. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  99. {
  100. __be32 data;
  101. memset(block, 0, csize);
  102. block += csize;
  103. if (csize >= 4)
  104. csize = 4;
  105. else if (msglen > (unsigned int)(1 << (8 * csize)))
  106. return -EOVERFLOW;
  107. data = cpu_to_be32(msglen);
  108. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  109. return 0;
  110. }
  111. /* taken from crypto/ccm.c */
  112. static inline int crypto_ccm_check_iv(const u8 *iv)
  113. {
  114. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  115. if (1 > iv[0] || iv[0] > 7)
  116. return -EINVAL;
  117. return 0;
  118. }
  119. /* based on code from crypto/ccm.c */
  120. static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
  121. unsigned int cryptlen, u8 *b0)
  122. {
  123. unsigned int l, lp, m = authsize;
  124. int rc;
  125. memcpy(b0, iv, 16);
  126. lp = b0[0];
  127. l = lp + 1;
  128. /* set m, bits 3-5 */
  129. *b0 |= (8 * ((m - 2) / 2));
  130. /* set adata, bit 6, if associated data is used */
  131. if (assoclen)
  132. *b0 |= 64;
  133. rc = set_msg_len(b0 + 16 - l, cryptlen, l);
  134. return rc;
  135. }
  136. static int generate_pat(u8 *iv,
  137. struct aead_request *req,
  138. struct nx_crypto_ctx *nx_ctx,
  139. unsigned int authsize,
  140. unsigned int nbytes,
  141. u8 *out)
  142. {
  143. struct nx_sg *nx_insg = nx_ctx->in_sg;
  144. struct nx_sg *nx_outsg = nx_ctx->out_sg;
  145. unsigned int iauth_len = 0;
  146. u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
  147. int rc;
  148. /* zero the ctr value */
  149. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  150. /* page 78 of nx_wb.pdf has,
  151. * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
  152. * in length. If a full message is used, the AES CCA implementation
  153. * restricts the maximum AAD length to 2^32 -1 bytes.
  154. * If partial messages are used, the implementation supports
  155. * 2^64 -1 bytes maximum AAD length.
  156. *
  157. * However, in the cryptoapi's aead_request structure,
  158. * assoclen is an unsigned int, thus it cannot hold a length
  159. * value greater than 2^32 - 1.
  160. * Thus the AAD is further constrained by this and is never
  161. * greater than 2^32.
  162. */
  163. if (!req->assoclen) {
  164. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  165. } else if (req->assoclen <= 14) {
  166. /* if associated data is 14 bytes or less, we do 1 GCM
  167. * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
  168. * which is fed in through the source buffers here */
  169. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  170. b1 = nx_ctx->priv.ccm.iauth_tag;
  171. iauth_len = req->assoclen;
  172. } else if (req->assoclen <= 65280) {
  173. /* if associated data is less than (2^16 - 2^8), we construct
  174. * B1 differently and feed in the associated data to a CCA
  175. * operation */
  176. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  177. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  178. iauth_len = 14;
  179. } else {
  180. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  181. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  182. iauth_len = 10;
  183. }
  184. /* generate B0 */
  185. rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
  186. if (rc)
  187. return rc;
  188. /* generate B1:
  189. * add control info for associated data
  190. * RFC 3610 and NIST Special Publication 800-38C
  191. */
  192. if (b1) {
  193. memset(b1, 0, 16);
  194. if (req->assoclen <= 65280) {
  195. *(u16 *)b1 = (u16)req->assoclen;
  196. scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
  197. iauth_len, SCATTERWALK_FROM_SG);
  198. } else {
  199. *(u16 *)b1 = (u16)(0xfffe);
  200. *(u32 *)&b1[2] = (u32)req->assoclen;
  201. scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
  202. iauth_len, SCATTERWALK_FROM_SG);
  203. }
  204. }
  205. /* now copy any remaining AAD to scatterlist and call nx... */
  206. if (!req->assoclen) {
  207. return rc;
  208. } else if (req->assoclen <= 14) {
  209. nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
  210. nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
  211. nx_ctx->ap->sglen);
  212. /* inlen should be negative, indicating to phyp that its a
  213. * pointer to an sg list */
  214. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
  215. sizeof(struct nx_sg);
  216. nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
  217. sizeof(struct nx_sg);
  218. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  219. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
  220. result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
  221. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  222. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  223. if (rc)
  224. return rc;
  225. atomic_inc(&(nx_ctx->stats->aes_ops));
  226. atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
  227. } else {
  228. u32 max_sg_len;
  229. unsigned int processed = 0, to_process;
  230. /* page_limit: number of sg entries that fit on one page */
  231. max_sg_len = min_t(u32,
  232. nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  233. nx_ctx->ap->sglen);
  234. processed += iauth_len;
  235. do {
  236. to_process = min_t(u32, req->assoclen - processed,
  237. nx_ctx->ap->databytelen);
  238. to_process = min_t(u64, to_process,
  239. NX_PAGE_SIZE * (max_sg_len - 1));
  240. if ((to_process + processed) < req->assoclen) {
  241. NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
  242. NX_FDM_INTERMEDIATE;
  243. } else {
  244. NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
  245. ~NX_FDM_INTERMEDIATE;
  246. }
  247. nx_insg = nx_walk_and_build(nx_ctx->in_sg,
  248. nx_ctx->ap->sglen,
  249. req->assoc, processed,
  250. to_process);
  251. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
  252. sizeof(struct nx_sg);
  253. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  254. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  255. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  256. if (rc)
  257. return rc;
  258. memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
  259. nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
  260. AES_BLOCK_SIZE);
  261. NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
  262. atomic_inc(&(nx_ctx->stats->aes_ops));
  263. atomic64_add(req->assoclen,
  264. &(nx_ctx->stats->aes_bytes));
  265. processed += to_process;
  266. } while (processed < req->assoclen);
  267. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  268. }
  269. memcpy(out, result, AES_BLOCK_SIZE);
  270. return rc;
  271. }
  272. static int ccm_nx_decrypt(struct aead_request *req,
  273. struct blkcipher_desc *desc)
  274. {
  275. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  276. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  277. unsigned int nbytes = req->cryptlen;
  278. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  279. struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
  280. unsigned long irq_flags;
  281. unsigned int processed = 0, to_process;
  282. u32 max_sg_len;
  283. int rc = -1;
  284. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  285. nbytes -= authsize;
  286. /* copy out the auth tag to compare with later */
  287. scatterwalk_map_and_copy(priv->oauth_tag,
  288. req->src, nbytes, authsize,
  289. SCATTERWALK_FROM_SG);
  290. rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
  291. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  292. if (rc)
  293. goto out;
  294. /* page_limit: number of sg entries that fit on one page */
  295. max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  296. nx_ctx->ap->sglen);
  297. do {
  298. /* to_process: the AES_BLOCK_SIZE data chunk to process in this
  299. * update. This value is bound by sg list limits.
  300. */
  301. to_process = min_t(u64, nbytes - processed,
  302. nx_ctx->ap->databytelen);
  303. to_process = min_t(u64, to_process,
  304. NX_PAGE_SIZE * (max_sg_len - 1));
  305. if ((to_process + processed) < nbytes)
  306. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  307. else
  308. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  309. NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  310. rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
  311. to_process, processed,
  312. csbcpb->cpb.aes_ccm.iv_or_ctr);
  313. if (rc)
  314. goto out;
  315. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  316. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  317. if (rc)
  318. goto out;
  319. /* for partial completion, copy following for next
  320. * entry into loop...
  321. */
  322. memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  323. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  324. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  325. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  326. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  327. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  328. /* update stats */
  329. atomic_inc(&(nx_ctx->stats->aes_ops));
  330. atomic64_add(csbcpb->csb.processed_byte_count,
  331. &(nx_ctx->stats->aes_bytes));
  332. processed += to_process;
  333. } while (processed < nbytes);
  334. rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
  335. authsize) ? -EBADMSG : 0;
  336. out:
  337. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  338. return rc;
  339. }
  340. static int ccm_nx_encrypt(struct aead_request *req,
  341. struct blkcipher_desc *desc)
  342. {
  343. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  344. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  345. unsigned int nbytes = req->cryptlen;
  346. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  347. unsigned long irq_flags;
  348. unsigned int processed = 0, to_process;
  349. u32 max_sg_len;
  350. int rc = -1;
  351. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  352. rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
  353. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  354. if (rc)
  355. goto out;
  356. /* page_limit: number of sg entries that fit on one page */
  357. max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  358. nx_ctx->ap->sglen);
  359. do {
  360. /* to process: the AES_BLOCK_SIZE data chunk to process in this
  361. * update. This value is bound by sg list limits.
  362. */
  363. to_process = min_t(u64, nbytes - processed,
  364. nx_ctx->ap->databytelen);
  365. to_process = min_t(u64, to_process,
  366. NX_PAGE_SIZE * (max_sg_len - 1));
  367. if ((to_process + processed) < nbytes)
  368. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  369. else
  370. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  371. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  372. rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
  373. to_process, processed,
  374. csbcpb->cpb.aes_ccm.iv_or_ctr);
  375. if (rc)
  376. goto out;
  377. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  378. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  379. if (rc)
  380. goto out;
  381. /* for partial completion, copy following for next
  382. * entry into loop...
  383. */
  384. memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  385. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  386. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  387. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  388. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  389. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  390. /* update stats */
  391. atomic_inc(&(nx_ctx->stats->aes_ops));
  392. atomic64_add(csbcpb->csb.processed_byte_count,
  393. &(nx_ctx->stats->aes_bytes));
  394. processed += to_process;
  395. } while (processed < nbytes);
  396. /* copy out the auth tag */
  397. scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
  398. req->dst, nbytes, authsize,
  399. SCATTERWALK_TO_SG);
  400. out:
  401. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  402. return rc;
  403. }
  404. static int ccm4309_aes_nx_encrypt(struct aead_request *req)
  405. {
  406. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  407. struct blkcipher_desc desc;
  408. u8 *iv = nx_ctx->priv.ccm.iv;
  409. iv[0] = 3;
  410. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  411. memcpy(iv + 4, req->iv, 8);
  412. desc.info = iv;
  413. desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
  414. return ccm_nx_encrypt(req, &desc);
  415. }
  416. static int ccm_aes_nx_encrypt(struct aead_request *req)
  417. {
  418. struct blkcipher_desc desc;
  419. int rc;
  420. desc.info = req->iv;
  421. desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
  422. rc = crypto_ccm_check_iv(desc.info);
  423. if (rc)
  424. return rc;
  425. return ccm_nx_encrypt(req, &desc);
  426. }
  427. static int ccm4309_aes_nx_decrypt(struct aead_request *req)
  428. {
  429. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  430. struct blkcipher_desc desc;
  431. u8 *iv = nx_ctx->priv.ccm.iv;
  432. iv[0] = 3;
  433. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  434. memcpy(iv + 4, req->iv, 8);
  435. desc.info = iv;
  436. desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
  437. return ccm_nx_decrypt(req, &desc);
  438. }
  439. static int ccm_aes_nx_decrypt(struct aead_request *req)
  440. {
  441. struct blkcipher_desc desc;
  442. int rc;
  443. desc.info = req->iv;
  444. desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
  445. rc = crypto_ccm_check_iv(desc.info);
  446. if (rc)
  447. return rc;
  448. return ccm_nx_decrypt(req, &desc);
  449. }
  450. /* tell the block cipher walk routines that this is a stream cipher by
  451. * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
  452. * during encrypt/decrypt doesn't solve this problem, because it calls
  453. * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  454. * but instead uses this tfm->blocksize. */
  455. struct crypto_alg nx_ccm_aes_alg = {
  456. .cra_name = "ccm(aes)",
  457. .cra_driver_name = "ccm-aes-nx",
  458. .cra_priority = 300,
  459. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  460. CRYPTO_ALG_NEED_FALLBACK,
  461. .cra_blocksize = 1,
  462. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  463. .cra_type = &crypto_aead_type,
  464. .cra_module = THIS_MODULE,
  465. .cra_init = nx_crypto_ctx_aes_ccm_init,
  466. .cra_exit = nx_crypto_ctx_exit,
  467. .cra_aead = {
  468. .ivsize = AES_BLOCK_SIZE,
  469. .maxauthsize = AES_BLOCK_SIZE,
  470. .setkey = ccm_aes_nx_set_key,
  471. .setauthsize = ccm_aes_nx_setauthsize,
  472. .encrypt = ccm_aes_nx_encrypt,
  473. .decrypt = ccm_aes_nx_decrypt,
  474. }
  475. };
  476. struct crypto_alg nx_ccm4309_aes_alg = {
  477. .cra_name = "rfc4309(ccm(aes))",
  478. .cra_driver_name = "rfc4309-ccm-aes-nx",
  479. .cra_priority = 300,
  480. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  481. CRYPTO_ALG_NEED_FALLBACK,
  482. .cra_blocksize = 1,
  483. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  484. .cra_type = &crypto_nivaead_type,
  485. .cra_module = THIS_MODULE,
  486. .cra_init = nx_crypto_ctx_aes_ccm_init,
  487. .cra_exit = nx_crypto_ctx_exit,
  488. .cra_aead = {
  489. .ivsize = 8,
  490. .maxauthsize = AES_BLOCK_SIZE,
  491. .setkey = ccm4309_aes_nx_set_key,
  492. .setauthsize = ccm4309_aes_nx_setauthsize,
  493. .encrypt = ccm4309_aes_nx_encrypt,
  494. .decrypt = ccm4309_aes_nx_decrypt,
  495. .geniv = "seqiv",
  496. }
  497. };