nx.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /**
  2. * Routines supporting the Power 7+ Nest Accelerators driver
  3. *
  4. * Copyright (C) 2011-2012 International Business Machines Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 only.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Author: Kent Yoder <yoder1@us.ibm.com>
  20. */
  21. #include <crypto/internal/hash.h>
  22. #include <crypto/hash.h>
  23. #include <crypto/aes.h>
  24. #include <crypto/sha.h>
  25. #include <crypto/algapi.h>
  26. #include <crypto/scatterwalk.h>
  27. #include <linux/module.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/types.h>
  30. #include <linux/mm.h>
  31. #include <linux/crypto.h>
  32. #include <linux/scatterlist.h>
  33. #include <linux/device.h>
  34. #include <linux/of.h>
  35. #include <asm/hvcall.h>
  36. #include <asm/vio.h>
  37. #include "nx_csbcpb.h"
  38. #include "nx.h"
  39. /**
  40. * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
  41. *
  42. * @nx_ctx: the crypto context handle
  43. * @op: PFO operation struct to pass in
  44. * @may_sleep: flag indicating the request can sleep
  45. *
  46. * Make the hcall, retrying while the hardware is busy. If we cannot yield
  47. * the thread, limit the number of retries to 10 here.
  48. */
  49. int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
  50. struct vio_pfo_op *op,
  51. u32 may_sleep)
  52. {
  53. int rc, retries = 10;
  54. struct vio_dev *viodev = nx_driver.viodev;
  55. atomic_inc(&(nx_ctx->stats->sync_ops));
  56. do {
  57. rc = vio_h_cop_sync(viodev, op);
  58. } while (rc == -EBUSY && !may_sleep && retries--);
  59. if (rc) {
  60. dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
  61. "hcall rc: %ld\n", rc, op->hcall_err);
  62. atomic_inc(&(nx_ctx->stats->errors));
  63. atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
  64. atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
  65. }
  66. return rc;
  67. }
  68. /**
  69. * nx_build_sg_list - build an NX scatter list describing a single buffer
  70. *
  71. * @sg_head: pointer to the first scatter list element to build
  72. * @start_addr: pointer to the linear buffer
  73. * @len: length of the data at @start_addr
  74. * @sgmax: the largest number of scatter list elements we're allowed to create
  75. *
  76. * This function will start writing nx_sg elements at @sg_head and keep
  77. * writing them until all of the data from @start_addr is described or
  78. * until sgmax elements have been written. Scatter list elements will be
  79. * created such that none of the elements describes a buffer that crosses a 4K
  80. * boundary.
  81. */
  82. struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
  83. u8 *start_addr,
  84. unsigned int len,
  85. u32 sgmax)
  86. {
  87. unsigned int sg_len = 0;
  88. struct nx_sg *sg;
  89. u64 sg_addr = (u64)start_addr;
  90. u64 end_addr;
  91. /* determine the start and end for this address range - slightly
  92. * different if this is in VMALLOC_REGION */
  93. if (is_vmalloc_addr(start_addr))
  94. sg_addr = page_to_phys(vmalloc_to_page(start_addr))
  95. + offset_in_page(sg_addr);
  96. else
  97. sg_addr = __pa(sg_addr);
  98. end_addr = sg_addr + len;
  99. /* each iteration will write one struct nx_sg element and add the
  100. * length of data described by that element to sg_len. Once @len bytes
  101. * have been described (or @sgmax elements have been written), the
  102. * loop ends. min_t is used to ensure @end_addr falls on the same page
  103. * as sg_addr, if not, we need to create another nx_sg element for the
  104. * data on the next page.
  105. *
  106. * Also when using vmalloc'ed data, every time that a system page
  107. * boundary is crossed the physical address needs to be re-calculated.
  108. */
  109. for (sg = sg_head; sg_len < len; sg++) {
  110. u64 next_page;
  111. sg->addr = sg_addr;
  112. sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
  113. end_addr);
  114. next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
  115. sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
  116. sg_len += sg->len;
  117. if (sg_addr >= next_page &&
  118. is_vmalloc_addr(start_addr + sg_len)) {
  119. sg_addr = page_to_phys(vmalloc_to_page(
  120. start_addr + sg_len));
  121. end_addr = sg_addr + len - sg_len;
  122. }
  123. if ((sg - sg_head) == sgmax) {
  124. pr_err("nx: scatter/gather list overflow, pid: %d\n",
  125. current->pid);
  126. return NULL;
  127. }
  128. }
  129. /* return the moved sg_head pointer */
  130. return sg;
  131. }
  132. /**
  133. * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
  134. *
  135. * @nx_dst: pointer to the first nx_sg element to write
  136. * @sglen: max number of nx_sg entries we're allowed to write
  137. * @sg_src: pointer to the source linux scatterlist to walk
  138. * @start: number of bytes to fast-forward past at the beginning of @sg_src
  139. * @src_len: number of bytes to walk in @sg_src
  140. */
  141. struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
  142. unsigned int sglen,
  143. struct scatterlist *sg_src,
  144. unsigned int start,
  145. unsigned int src_len)
  146. {
  147. struct scatter_walk walk;
  148. struct nx_sg *nx_sg = nx_dst;
  149. unsigned int n, offset = 0, len = src_len;
  150. char *dst;
  151. /* we need to fast forward through @start bytes first */
  152. for (;;) {
  153. scatterwalk_start(&walk, sg_src);
  154. if (start < offset + sg_src->length)
  155. break;
  156. offset += sg_src->length;
  157. sg_src = scatterwalk_sg_next(sg_src);
  158. }
  159. /* start - offset is the number of bytes to advance in the scatterlist
  160. * element we're currently looking at */
  161. scatterwalk_advance(&walk, start - offset);
  162. while (len && nx_sg) {
  163. n = scatterwalk_clamp(&walk, len);
  164. if (!n) {
  165. scatterwalk_start(&walk, sg_next(walk.sg));
  166. n = scatterwalk_clamp(&walk, len);
  167. }
  168. dst = scatterwalk_map(&walk);
  169. nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
  170. len -= n;
  171. scatterwalk_unmap(dst);
  172. scatterwalk_advance(&walk, n);
  173. scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
  174. }
  175. /* return the moved destination pointer */
  176. return nx_sg;
  177. }
  178. /**
  179. * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
  180. * scatterlists based on them.
  181. *
  182. * @nx_ctx: NX crypto context for the lists we're building
  183. * @desc: the block cipher descriptor for the operation
  184. * @dst: destination scatterlist
  185. * @src: source scatterlist
  186. * @nbytes: length of data described in the scatterlists
  187. * @offset: number of bytes to fast-forward past at the beginning of
  188. * scatterlists.
  189. * @iv: destination for the iv data, if the algorithm requires it
  190. *
  191. * This is common code shared by all the AES algorithms. It uses the block
  192. * cipher walk routines to traverse input and output scatterlists, building
  193. * corresponding NX scatterlists
  194. */
  195. int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
  196. struct blkcipher_desc *desc,
  197. struct scatterlist *dst,
  198. struct scatterlist *src,
  199. unsigned int nbytes,
  200. unsigned int offset,
  201. u8 *iv)
  202. {
  203. struct nx_sg *nx_insg = nx_ctx->in_sg;
  204. struct nx_sg *nx_outsg = nx_ctx->out_sg;
  205. if (iv)
  206. memcpy(iv, desc->info, AES_BLOCK_SIZE);
  207. nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
  208. offset, nbytes);
  209. nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
  210. offset, nbytes);
  211. /* these lengths should be negative, which will indicate to phyp that
  212. * the input and output parameters are scatterlists, not linear
  213. * buffers */
  214. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
  215. nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
  216. return 0;
  217. }
  218. /**
  219. * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
  220. *
  221. * @nx_ctx: the nx context to initialize
  222. * @function: the function code for the op
  223. */
  224. void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
  225. {
  226. spin_lock_init(&nx_ctx->lock);
  227. memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
  228. nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
  229. nx_ctx->op.flags = function;
  230. nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
  231. nx_ctx->op.in = __pa(nx_ctx->in_sg);
  232. nx_ctx->op.out = __pa(nx_ctx->out_sg);
  233. if (nx_ctx->csbcpb_aead) {
  234. nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
  235. nx_ctx->op_aead.flags = function;
  236. nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
  237. nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
  238. nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
  239. }
  240. }
  241. static void nx_of_update_status(struct device *dev,
  242. struct property *p,
  243. struct nx_of *props)
  244. {
  245. if (!strncmp(p->value, "okay", p->length)) {
  246. props->status = NX_WAITING;
  247. props->flags |= NX_OF_FLAG_STATUS_SET;
  248. } else {
  249. dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
  250. (char *)p->value);
  251. }
  252. }
  253. static void nx_of_update_sglen(struct device *dev,
  254. struct property *p,
  255. struct nx_of *props)
  256. {
  257. if (p->length != sizeof(props->max_sg_len)) {
  258. dev_err(dev, "%s: unexpected format for "
  259. "ibm,max-sg-len property\n", __func__);
  260. dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
  261. "long, expected %zd bytes\n", __func__,
  262. p->length, sizeof(props->max_sg_len));
  263. return;
  264. }
  265. props->max_sg_len = *(u32 *)p->value;
  266. props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
  267. }
  268. static void nx_of_update_msc(struct device *dev,
  269. struct property *p,
  270. struct nx_of *props)
  271. {
  272. struct msc_triplet *trip;
  273. struct max_sync_cop *msc;
  274. unsigned int bytes_so_far, i, lenp;
  275. msc = (struct max_sync_cop *)p->value;
  276. lenp = p->length;
  277. /* You can't tell if the data read in for this property is sane by its
  278. * size alone. This is because there are sizes embedded in the data
  279. * structure. The best we can do is check lengths as we parse and bail
  280. * as soon as a length error is detected. */
  281. bytes_so_far = 0;
  282. while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
  283. bytes_so_far += sizeof(struct max_sync_cop);
  284. trip = msc->trip;
  285. for (i = 0;
  286. ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
  287. i < msc->triplets;
  288. i++) {
  289. if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
  290. dev_err(dev, "unknown function code/mode "
  291. "combo: %d/%d (ignored)\n", msc->fc,
  292. msc->mode);
  293. goto next_loop;
  294. }
  295. switch (trip->keybitlen) {
  296. case 128:
  297. case 160:
  298. props->ap[msc->fc][msc->mode][0].databytelen =
  299. trip->databytelen;
  300. props->ap[msc->fc][msc->mode][0].sglen =
  301. trip->sglen;
  302. break;
  303. case 192:
  304. props->ap[msc->fc][msc->mode][1].databytelen =
  305. trip->databytelen;
  306. props->ap[msc->fc][msc->mode][1].sglen =
  307. trip->sglen;
  308. break;
  309. case 256:
  310. if (msc->fc == NX_FC_AES) {
  311. props->ap[msc->fc][msc->mode][2].
  312. databytelen = trip->databytelen;
  313. props->ap[msc->fc][msc->mode][2].sglen =
  314. trip->sglen;
  315. } else if (msc->fc == NX_FC_AES_HMAC ||
  316. msc->fc == NX_FC_SHA) {
  317. props->ap[msc->fc][msc->mode][1].
  318. databytelen = trip->databytelen;
  319. props->ap[msc->fc][msc->mode][1].sglen =
  320. trip->sglen;
  321. } else {
  322. dev_warn(dev, "unknown function "
  323. "code/key bit len combo"
  324. ": (%u/256)\n", msc->fc);
  325. }
  326. break;
  327. case 512:
  328. props->ap[msc->fc][msc->mode][2].databytelen =
  329. trip->databytelen;
  330. props->ap[msc->fc][msc->mode][2].sglen =
  331. trip->sglen;
  332. break;
  333. default:
  334. dev_warn(dev, "unknown function code/key bit "
  335. "len combo: (%u/%u)\n", msc->fc,
  336. trip->keybitlen);
  337. break;
  338. }
  339. next_loop:
  340. bytes_so_far += sizeof(struct msc_triplet);
  341. trip++;
  342. }
  343. msc = (struct max_sync_cop *)trip;
  344. }
  345. props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
  346. }
  347. /**
  348. * nx_of_init - read openFirmware values from the device tree
  349. *
  350. * @dev: device handle
  351. * @props: pointer to struct to hold the properties values
  352. *
  353. * Called once at driver probe time, this function will read out the
  354. * openFirmware properties we use at runtime. If all the OF properties are
  355. * acceptable, when we exit this function props->flags will indicate that
  356. * we're ready to register our crypto algorithms.
  357. */
  358. static void nx_of_init(struct device *dev, struct nx_of *props)
  359. {
  360. struct device_node *base_node = dev->of_node;
  361. struct property *p;
  362. p = of_find_property(base_node, "status", NULL);
  363. if (!p)
  364. dev_info(dev, "%s: property 'status' not found\n", __func__);
  365. else
  366. nx_of_update_status(dev, p, props);
  367. p = of_find_property(base_node, "ibm,max-sg-len", NULL);
  368. if (!p)
  369. dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
  370. __func__);
  371. else
  372. nx_of_update_sglen(dev, p, props);
  373. p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
  374. if (!p)
  375. dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
  376. __func__);
  377. else
  378. nx_of_update_msc(dev, p, props);
  379. }
  380. /**
  381. * nx_register_algs - register algorithms with the crypto API
  382. *
  383. * Called from nx_probe()
  384. *
  385. * If all OF properties are in an acceptable state, the driver flags will
  386. * indicate that we're ready and we'll create our debugfs files and register
  387. * out crypto algorithms.
  388. */
  389. static int nx_register_algs(void)
  390. {
  391. int rc = -1;
  392. if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
  393. goto out;
  394. memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
  395. rc = NX_DEBUGFS_INIT(&nx_driver);
  396. if (rc)
  397. goto out;
  398. nx_driver.of.status = NX_OKAY;
  399. rc = crypto_register_alg(&nx_ecb_aes_alg);
  400. if (rc)
  401. goto out;
  402. rc = crypto_register_alg(&nx_cbc_aes_alg);
  403. if (rc)
  404. goto out_unreg_ecb;
  405. rc = crypto_register_alg(&nx_ctr_aes_alg);
  406. if (rc)
  407. goto out_unreg_cbc;
  408. rc = crypto_register_alg(&nx_ctr3686_aes_alg);
  409. if (rc)
  410. goto out_unreg_ctr;
  411. rc = crypto_register_alg(&nx_gcm_aes_alg);
  412. if (rc)
  413. goto out_unreg_ctr3686;
  414. rc = crypto_register_alg(&nx_gcm4106_aes_alg);
  415. if (rc)
  416. goto out_unreg_gcm;
  417. rc = crypto_register_alg(&nx_ccm_aes_alg);
  418. if (rc)
  419. goto out_unreg_gcm4106;
  420. rc = crypto_register_alg(&nx_ccm4309_aes_alg);
  421. if (rc)
  422. goto out_unreg_ccm;
  423. rc = crypto_register_shash(&nx_shash_sha256_alg);
  424. if (rc)
  425. goto out_unreg_ccm4309;
  426. rc = crypto_register_shash(&nx_shash_sha512_alg);
  427. if (rc)
  428. goto out_unreg_s256;
  429. rc = crypto_register_shash(&nx_shash_aes_xcbc_alg);
  430. if (rc)
  431. goto out_unreg_s512;
  432. goto out;
  433. out_unreg_s512:
  434. crypto_unregister_shash(&nx_shash_sha512_alg);
  435. out_unreg_s256:
  436. crypto_unregister_shash(&nx_shash_sha256_alg);
  437. out_unreg_ccm4309:
  438. crypto_unregister_alg(&nx_ccm4309_aes_alg);
  439. out_unreg_ccm:
  440. crypto_unregister_alg(&nx_ccm_aes_alg);
  441. out_unreg_gcm4106:
  442. crypto_unregister_alg(&nx_gcm4106_aes_alg);
  443. out_unreg_gcm:
  444. crypto_unregister_alg(&nx_gcm_aes_alg);
  445. out_unreg_ctr3686:
  446. crypto_unregister_alg(&nx_ctr3686_aes_alg);
  447. out_unreg_ctr:
  448. crypto_unregister_alg(&nx_ctr_aes_alg);
  449. out_unreg_cbc:
  450. crypto_unregister_alg(&nx_cbc_aes_alg);
  451. out_unreg_ecb:
  452. crypto_unregister_alg(&nx_ecb_aes_alg);
  453. out:
  454. return rc;
  455. }
  456. /**
  457. * nx_crypto_ctx_init - create and initialize a crypto api context
  458. *
  459. * @nx_ctx: the crypto api context
  460. * @fc: function code for the context
  461. * @mode: the function code specific mode for this context
  462. */
  463. static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
  464. {
  465. if (nx_driver.of.status != NX_OKAY) {
  466. pr_err("Attempt to initialize NX crypto context while device "
  467. "is not available!\n");
  468. return -ENODEV;
  469. }
  470. /* we need an extra page for csbcpb_aead for these modes */
  471. if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
  472. nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
  473. sizeof(struct nx_csbcpb);
  474. else
  475. nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
  476. sizeof(struct nx_csbcpb);
  477. nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
  478. if (!nx_ctx->kmem)
  479. return -ENOMEM;
  480. /* the csbcpb and scatterlists must be 4K aligned pages */
  481. nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
  482. (u64)NX_PAGE_SIZE));
  483. nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
  484. nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
  485. if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
  486. nx_ctx->csbcpb_aead =
  487. (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
  488. NX_PAGE_SIZE);
  489. /* give each context a pointer to global stats and their OF
  490. * properties */
  491. nx_ctx->stats = &nx_driver.stats;
  492. memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
  493. sizeof(struct alg_props) * 3);
  494. return 0;
  495. }
  496. /* entry points from the crypto tfm initializers */
  497. int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
  498. {
  499. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  500. NX_MODE_AES_CCM);
  501. }
  502. int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
  503. {
  504. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  505. NX_MODE_AES_GCM);
  506. }
  507. int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
  508. {
  509. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  510. NX_MODE_AES_CTR);
  511. }
  512. int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
  513. {
  514. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  515. NX_MODE_AES_CBC);
  516. }
  517. int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
  518. {
  519. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  520. NX_MODE_AES_ECB);
  521. }
  522. int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
  523. {
  524. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
  525. }
  526. int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
  527. {
  528. return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
  529. NX_MODE_AES_XCBC_MAC);
  530. }
  531. /**
  532. * nx_crypto_ctx_exit - destroy a crypto api context
  533. *
  534. * @tfm: the crypto transform pointer for the context
  535. *
  536. * As crypto API contexts are destroyed, this exit hook is called to free the
  537. * memory associated with it.
  538. */
  539. void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
  540. {
  541. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
  542. kzfree(nx_ctx->kmem);
  543. nx_ctx->csbcpb = NULL;
  544. nx_ctx->csbcpb_aead = NULL;
  545. nx_ctx->in_sg = NULL;
  546. nx_ctx->out_sg = NULL;
  547. }
  548. static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
  549. {
  550. dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
  551. viodev->name, viodev->resource_id);
  552. if (nx_driver.viodev) {
  553. dev_err(&viodev->dev, "%s: Attempt to register more than one "
  554. "instance of the hardware\n", __func__);
  555. return -EINVAL;
  556. }
  557. nx_driver.viodev = viodev;
  558. nx_of_init(&viodev->dev, &nx_driver.of);
  559. return nx_register_algs();
  560. }
  561. static int nx_remove(struct vio_dev *viodev)
  562. {
  563. dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
  564. viodev->unit_address);
  565. if (nx_driver.of.status == NX_OKAY) {
  566. NX_DEBUGFS_FINI(&nx_driver);
  567. crypto_unregister_alg(&nx_ccm_aes_alg);
  568. crypto_unregister_alg(&nx_ccm4309_aes_alg);
  569. crypto_unregister_alg(&nx_gcm_aes_alg);
  570. crypto_unregister_alg(&nx_gcm4106_aes_alg);
  571. crypto_unregister_alg(&nx_ctr_aes_alg);
  572. crypto_unregister_alg(&nx_ctr3686_aes_alg);
  573. crypto_unregister_alg(&nx_cbc_aes_alg);
  574. crypto_unregister_alg(&nx_ecb_aes_alg);
  575. crypto_unregister_shash(&nx_shash_sha256_alg);
  576. crypto_unregister_shash(&nx_shash_sha512_alg);
  577. crypto_unregister_shash(&nx_shash_aes_xcbc_alg);
  578. }
  579. return 0;
  580. }
  581. /* module wide initialization/cleanup */
  582. static int __init nx_init(void)
  583. {
  584. return vio_register_driver(&nx_driver.viodriver);
  585. }
  586. static void __exit nx_fini(void)
  587. {
  588. vio_unregister_driver(&nx_driver.viodriver);
  589. }
  590. static struct vio_device_id nx_crypto_driver_ids[] = {
  591. { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
  592. { "", "" }
  593. };
  594. MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
  595. /* driver state structure */
  596. struct nx_crypto_driver nx_driver = {
  597. .viodriver = {
  598. .id_table = nx_crypto_driver_ids,
  599. .probe = nx_probe,
  600. .remove = nx_remove,
  601. .name = NX_NAME,
  602. },
  603. };
  604. module_init(nx_init);
  605. module_exit(nx_fini);
  606. MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
  607. MODULE_DESCRIPTION(NX_STRING);
  608. MODULE_LICENSE("GPL");
  609. MODULE_VERSION(NX_VERSION);