nx-aes-gcm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /**
  2. * AES GCM routines supporting the Power 7+ Nest Accelerators driver
  3. *
  4. * Copyright (C) 2012 International Business Machines Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 only.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Author: Kent Yoder <yoder1@us.ibm.com>
  20. */
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/algapi.h>
  24. #include <crypto/scatterwalk.h>
  25. #include <linux/module.h>
  26. #include <linux/types.h>
  27. #include <linux/crypto.h>
  28. #include <asm/vio.h>
  29. #include "nx_csbcpb.h"
  30. #include "nx.h"
  31. static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
  32. const u8 *in_key,
  33. unsigned int key_len)
  34. {
  35. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  36. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  37. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  38. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  39. switch (key_len) {
  40. case AES_KEYSIZE_128:
  41. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  42. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  43. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  44. break;
  45. case AES_KEYSIZE_192:
  46. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
  47. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
  48. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
  49. break;
  50. case AES_KEYSIZE_256:
  51. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
  52. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
  53. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
  54. break;
  55. default:
  56. return -EINVAL;
  57. }
  58. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  59. memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
  60. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
  61. memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
  62. return 0;
  63. }
  64. static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
  65. const u8 *in_key,
  66. unsigned int key_len)
  67. {
  68. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  69. char *nonce = nx_ctx->priv.gcm.nonce;
  70. int rc;
  71. if (key_len < 4)
  72. return -EINVAL;
  73. key_len -= 4;
  74. rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
  75. if (rc)
  76. goto out;
  77. memcpy(nonce, in_key + key_len, 4);
  78. out:
  79. return rc;
  80. }
  81. static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
  82. unsigned int authsize)
  83. {
  84. if (authsize > crypto_aead_alg(tfm)->maxauthsize)
  85. return -EINVAL;
  86. crypto_aead_crt(tfm)->authsize = authsize;
  87. return 0;
  88. }
  89. static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
  90. unsigned int authsize)
  91. {
  92. switch (authsize) {
  93. case 8:
  94. case 12:
  95. case 16:
  96. break;
  97. default:
  98. return -EINVAL;
  99. }
  100. crypto_aead_crt(tfm)->authsize = authsize;
  101. return 0;
  102. }
  103. static int nx_gca(struct nx_crypto_ctx *nx_ctx,
  104. struct aead_request *req,
  105. u8 *out)
  106. {
  107. int rc;
  108. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  109. struct scatter_walk walk;
  110. struct nx_sg *nx_sg = nx_ctx->in_sg;
  111. unsigned int nbytes = req->assoclen;
  112. unsigned int processed = 0, to_process;
  113. u32 max_sg_len;
  114. if (nbytes <= AES_BLOCK_SIZE) {
  115. scatterwalk_start(&walk, req->assoc);
  116. scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
  117. scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
  118. return 0;
  119. }
  120. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
  121. /* page_limit: number of sg entries that fit on one page */
  122. max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  123. nx_ctx->ap->sglen);
  124. do {
  125. /*
  126. * to_process: the data chunk to process in this update.
  127. * This value is bound by sg list limits.
  128. */
  129. to_process = min_t(u64, nbytes - processed,
  130. nx_ctx->ap->databytelen);
  131. to_process = min_t(u64, to_process,
  132. NX_PAGE_SIZE * (max_sg_len - 1));
  133. if ((to_process + processed) < nbytes)
  134. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
  135. else
  136. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
  137. nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
  138. req->assoc, processed, to_process);
  139. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
  140. * sizeof(struct nx_sg);
  141. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  142. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  143. if (rc)
  144. return rc;
  145. memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
  146. csbcpb_aead->cpb.aes_gca.out_pat,
  147. AES_BLOCK_SIZE);
  148. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
  149. atomic_inc(&(nx_ctx->stats->aes_ops));
  150. atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
  151. processed += to_process;
  152. } while (processed < nbytes);
  153. memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
  154. return rc;
  155. }
  156. static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
  157. {
  158. int rc;
  159. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  160. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  161. struct nx_sg *nx_sg;
  162. unsigned int nbytes = req->assoclen;
  163. unsigned int processed = 0, to_process;
  164. u32 max_sg_len;
  165. /* Set GMAC mode */
  166. csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
  167. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  168. /* page_limit: number of sg entries that fit on one page */
  169. max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  170. nx_ctx->ap->sglen);
  171. /* Copy IV */
  172. memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
  173. do {
  174. /*
  175. * to_process: the data chunk to process in this update.
  176. * This value is bound by sg list limits.
  177. */
  178. to_process = min_t(u64, nbytes - processed,
  179. nx_ctx->ap->databytelen);
  180. to_process = min_t(u64, to_process,
  181. NX_PAGE_SIZE * (max_sg_len - 1));
  182. if ((to_process + processed) < nbytes)
  183. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  184. else
  185. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  186. nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
  187. req->assoc, processed, to_process);
  188. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
  189. * sizeof(struct nx_sg);
  190. csbcpb->cpb.aes_gcm.bit_length_data = 0;
  191. csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
  192. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  193. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  194. if (rc)
  195. goto out;
  196. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  197. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  198. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  199. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  200. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  201. atomic_inc(&(nx_ctx->stats->aes_ops));
  202. atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
  203. processed += to_process;
  204. } while (processed < nbytes);
  205. out:
  206. /* Restore GCM mode */
  207. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  208. return rc;
  209. }
  210. static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
  211. int enc)
  212. {
  213. int rc;
  214. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  215. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  216. char out[AES_BLOCK_SIZE];
  217. struct nx_sg *in_sg, *out_sg;
  218. /* For scenarios where the input message is zero length, AES CTR mode
  219. * may be used. Set the source data to be a single block (16B) of all
  220. * zeros, and set the input IV value to be the same as the GMAC IV
  221. * value. - nx_wb 4.8.1.3 */
  222. /* Change to ECB mode */
  223. csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
  224. memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
  225. sizeof(csbcpb->cpb.aes_ecb.key));
  226. if (enc)
  227. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  228. else
  229. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  230. /* Encrypt the counter/IV */
  231. in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
  232. AES_BLOCK_SIZE, nx_ctx->ap->sglen);
  233. out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
  234. nx_ctx->ap->sglen);
  235. nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
  236. nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
  237. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  238. desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  239. if (rc)
  240. goto out;
  241. atomic_inc(&(nx_ctx->stats->aes_ops));
  242. /* Copy out the auth tag */
  243. memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
  244. crypto_aead_authsize(crypto_aead_reqtfm(req)));
  245. out:
  246. /* Restore XCBC mode */
  247. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  248. /*
  249. * ECB key uses the same region that GCM AAD and counter, so it's safe
  250. * to just fill it with zeroes.
  251. */
  252. memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
  253. return rc;
  254. }
  255. static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
  256. {
  257. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  258. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  259. struct blkcipher_desc desc;
  260. unsigned int nbytes = req->cryptlen;
  261. unsigned int processed = 0, to_process;
  262. unsigned long irq_flags;
  263. u32 max_sg_len;
  264. int rc = -EINVAL;
  265. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  266. desc.info = nx_ctx->priv.gcm.iv;
  267. /* initialize the counter */
  268. *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
  269. if (nbytes == 0) {
  270. if (req->assoclen == 0)
  271. rc = gcm_empty(req, &desc, enc);
  272. else
  273. rc = gmac(req, &desc);
  274. if (rc)
  275. goto out;
  276. else
  277. goto mac;
  278. }
  279. /* Process associated data */
  280. csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
  281. if (req->assoclen) {
  282. rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
  283. if (rc)
  284. goto out;
  285. }
  286. /* Set flags for encryption */
  287. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  288. if (enc) {
  289. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  290. } else {
  291. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  292. nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
  293. }
  294. /* page_limit: number of sg entries that fit on one page */
  295. max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  296. nx_ctx->ap->sglen);
  297. do {
  298. /*
  299. * to_process: the data chunk to process in this update.
  300. * This value is bound by sg list limits.
  301. */
  302. to_process = min_t(u64, nbytes - processed,
  303. nx_ctx->ap->databytelen);
  304. to_process = min_t(u64, to_process,
  305. NX_PAGE_SIZE * (max_sg_len - 1));
  306. if ((to_process + processed) < nbytes)
  307. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  308. else
  309. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  310. csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
  311. desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
  312. rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
  313. req->src, to_process, processed,
  314. csbcpb->cpb.aes_gcm.iv_or_cnt);
  315. if (rc)
  316. goto out;
  317. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  318. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  319. if (rc)
  320. goto out;
  321. memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
  322. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  323. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  324. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  325. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  326. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  327. atomic_inc(&(nx_ctx->stats->aes_ops));
  328. atomic64_add(csbcpb->csb.processed_byte_count,
  329. &(nx_ctx->stats->aes_bytes));
  330. processed += to_process;
  331. } while (processed < nbytes);
  332. mac:
  333. if (enc) {
  334. /* copy out the auth tag */
  335. scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
  336. req->dst, nbytes,
  337. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  338. SCATTERWALK_TO_SG);
  339. } else {
  340. u8 *itag = nx_ctx->priv.gcm.iauth_tag;
  341. u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
  342. scatterwalk_map_and_copy(itag, req->src, nbytes,
  343. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  344. SCATTERWALK_FROM_SG);
  345. rc = memcmp(itag, otag,
  346. crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
  347. -EBADMSG : 0;
  348. }
  349. out:
  350. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  351. return rc;
  352. }
  353. static int gcm_aes_nx_encrypt(struct aead_request *req)
  354. {
  355. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  356. char *iv = nx_ctx->priv.gcm.iv;
  357. memcpy(iv, req->iv, 12);
  358. return gcm_aes_nx_crypt(req, 1);
  359. }
  360. static int gcm_aes_nx_decrypt(struct aead_request *req)
  361. {
  362. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  363. char *iv = nx_ctx->priv.gcm.iv;
  364. memcpy(iv, req->iv, 12);
  365. return gcm_aes_nx_crypt(req, 0);
  366. }
  367. static int gcm4106_aes_nx_encrypt(struct aead_request *req)
  368. {
  369. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  370. char *iv = nx_ctx->priv.gcm.iv;
  371. char *nonce = nx_ctx->priv.gcm.nonce;
  372. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  373. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  374. return gcm_aes_nx_crypt(req, 1);
  375. }
  376. static int gcm4106_aes_nx_decrypt(struct aead_request *req)
  377. {
  378. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  379. char *iv = nx_ctx->priv.gcm.iv;
  380. char *nonce = nx_ctx->priv.gcm.nonce;
  381. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  382. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  383. return gcm_aes_nx_crypt(req, 0);
  384. }
  385. /* tell the block cipher walk routines that this is a stream cipher by
  386. * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
  387. * during encrypt/decrypt doesn't solve this problem, because it calls
  388. * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  389. * but instead uses this tfm->blocksize. */
  390. struct crypto_alg nx_gcm_aes_alg = {
  391. .cra_name = "gcm(aes)",
  392. .cra_driver_name = "gcm-aes-nx",
  393. .cra_priority = 300,
  394. .cra_flags = CRYPTO_ALG_TYPE_AEAD,
  395. .cra_blocksize = 1,
  396. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  397. .cra_type = &crypto_aead_type,
  398. .cra_module = THIS_MODULE,
  399. .cra_init = nx_crypto_ctx_aes_gcm_init,
  400. .cra_exit = nx_crypto_ctx_exit,
  401. .cra_aead = {
  402. .ivsize = AES_BLOCK_SIZE,
  403. .maxauthsize = AES_BLOCK_SIZE,
  404. .setkey = gcm_aes_nx_set_key,
  405. .setauthsize = gcm_aes_nx_setauthsize,
  406. .encrypt = gcm_aes_nx_encrypt,
  407. .decrypt = gcm_aes_nx_decrypt,
  408. }
  409. };
  410. struct crypto_alg nx_gcm4106_aes_alg = {
  411. .cra_name = "rfc4106(gcm(aes))",
  412. .cra_driver_name = "rfc4106-gcm-aes-nx",
  413. .cra_priority = 300,
  414. .cra_flags = CRYPTO_ALG_TYPE_AEAD,
  415. .cra_blocksize = 1,
  416. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  417. .cra_type = &crypto_nivaead_type,
  418. .cra_module = THIS_MODULE,
  419. .cra_init = nx_crypto_ctx_aes_gcm_init,
  420. .cra_exit = nx_crypto_ctx_exit,
  421. .cra_aead = {
  422. .ivsize = 8,
  423. .maxauthsize = AES_BLOCK_SIZE,
  424. .geniv = "seqiv",
  425. .setkey = gcm4106_aes_nx_set_key,
  426. .setauthsize = gcm4106_aes_nx_setauthsize,
  427. .encrypt = gcm4106_aes_nx_encrypt,
  428. .decrypt = gcm4106_aes_nx_decrypt,
  429. }
  430. };