mv_cesa.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * Support for Marvell's crypto engine which can be found on some Orion5X
  3. * boards.
  4. *
  5. * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  6. * License: GPLv2
  7. *
  8. */
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <linux/crypto.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/kthread.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/scatterlist.h>
  17. #include "mv_cesa.h"
  18. /*
  19. * STM:
  20. * /---------------------------------------\
  21. * | | request complete
  22. * \./ |
  23. * IDLE -> new request -> BUSY -> done -> DEQUEUE
  24. * /°\ |
  25. * | | more scatter entries
  26. * \________________/
  27. */
  28. enum engine_status {
  29. ENGINE_IDLE,
  30. ENGINE_BUSY,
  31. ENGINE_W_DEQUEUE,
  32. };
  33. /**
  34. * struct req_progress - used for every crypt request
  35. * @src_sg_it: sg iterator for src
  36. * @dst_sg_it: sg iterator for dst
  37. * @sg_src_left: bytes left in src to process (scatter list)
  38. * @src_start: offset to add to src start position (scatter list)
  39. * @crypt_len: length of current crypt process
  40. * @hw_nbytes: total bytes to process in hw for this request
  41. * @copy_back: whether to copy data back (crypt) or not (hash)
  42. * @sg_dst_left: bytes left dst to process in this scatter list
  43. * @dst_start: offset to add to dst start position (scatter list)
  44. * @hw_processed_bytes: number of bytes processed by hw (request).
  45. *
  46. * sg helper are used to iterate over the scatterlist. Since the size of the
  47. * SRAM may be less than the scatter size, this struct struct is used to keep
  48. * track of progress within current scatterlist.
  49. */
  50. struct req_progress {
  51. struct sg_mapping_iter src_sg_it;
  52. struct sg_mapping_iter dst_sg_it;
  53. void (*complete) (void);
  54. void (*process) (int is_first);
  55. /* src mostly */
  56. int sg_src_left;
  57. int src_start;
  58. int crypt_len;
  59. int hw_nbytes;
  60. /* dst mostly */
  61. int copy_back;
  62. int sg_dst_left;
  63. int dst_start;
  64. int hw_processed_bytes;
  65. };
  66. struct crypto_priv {
  67. void __iomem *reg;
  68. void __iomem *sram;
  69. int irq;
  70. struct task_struct *queue_th;
  71. /* the lock protects queue and eng_st */
  72. spinlock_t lock;
  73. struct crypto_queue queue;
  74. enum engine_status eng_st;
  75. struct crypto_async_request *cur_req;
  76. struct req_progress p;
  77. int max_req_size;
  78. int sram_size;
  79. };
  80. static struct crypto_priv *cpg;
  81. struct mv_ctx {
  82. u8 aes_enc_key[AES_KEY_LEN];
  83. u32 aes_dec_key[8];
  84. int key_len;
  85. u32 need_calc_aes_dkey;
  86. };
  87. enum crypto_op {
  88. COP_AES_ECB,
  89. COP_AES_CBC,
  90. };
  91. struct mv_req_ctx {
  92. enum crypto_op op;
  93. int decrypt;
  94. };
  95. static void compute_aes_dec_key(struct mv_ctx *ctx)
  96. {
  97. struct crypto_aes_ctx gen_aes_key;
  98. int key_pos;
  99. if (!ctx->need_calc_aes_dkey)
  100. return;
  101. crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
  102. key_pos = ctx->key_len + 24;
  103. memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
  104. switch (ctx->key_len) {
  105. case AES_KEYSIZE_256:
  106. key_pos -= 2;
  107. /* fall */
  108. case AES_KEYSIZE_192:
  109. key_pos -= 2;
  110. memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
  111. 4 * 4);
  112. break;
  113. }
  114. ctx->need_calc_aes_dkey = 0;
  115. }
  116. static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
  117. unsigned int len)
  118. {
  119. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  120. struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
  121. switch (len) {
  122. case AES_KEYSIZE_128:
  123. case AES_KEYSIZE_192:
  124. case AES_KEYSIZE_256:
  125. break;
  126. default:
  127. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  128. return -EINVAL;
  129. }
  130. ctx->key_len = len;
  131. ctx->need_calc_aes_dkey = 1;
  132. memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
  133. return 0;
  134. }
  135. static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
  136. {
  137. int ret;
  138. void *sbuf;
  139. int copied = 0;
  140. while (1) {
  141. if (!p->sg_src_left) {
  142. ret = sg_miter_next(&p->src_sg_it);
  143. BUG_ON(!ret);
  144. p->sg_src_left = p->src_sg_it.length;
  145. p->src_start = 0;
  146. }
  147. sbuf = p->src_sg_it.addr + p->src_start;
  148. if (p->sg_src_left <= len - copied) {
  149. memcpy(dbuf + copied, sbuf, p->sg_src_left);
  150. copied += p->sg_src_left;
  151. p->sg_src_left = 0;
  152. if (copied >= len)
  153. break;
  154. } else {
  155. int copy_len = len - copied;
  156. memcpy(dbuf + copied, sbuf, copy_len);
  157. p->src_start += copy_len;
  158. p->sg_src_left -= copy_len;
  159. break;
  160. }
  161. }
  162. }
  163. static void setup_data_in(void)
  164. {
  165. struct req_progress *p = &cpg->p;
  166. p->crypt_len =
  167. min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
  168. copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
  169. p->crypt_len);
  170. }
  171. static void mv_process_current_q(int first_block)
  172. {
  173. struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
  174. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  175. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  176. struct sec_accel_config op;
  177. switch (req_ctx->op) {
  178. case COP_AES_ECB:
  179. op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
  180. break;
  181. case COP_AES_CBC:
  182. default:
  183. op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
  184. op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
  185. ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
  186. if (first_block)
  187. memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
  188. break;
  189. }
  190. if (req_ctx->decrypt) {
  191. op.config |= CFG_DIR_DEC;
  192. memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
  193. AES_KEY_LEN);
  194. } else {
  195. op.config |= CFG_DIR_ENC;
  196. memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
  197. AES_KEY_LEN);
  198. }
  199. switch (ctx->key_len) {
  200. case AES_KEYSIZE_128:
  201. op.config |= CFG_AES_LEN_128;
  202. break;
  203. case AES_KEYSIZE_192:
  204. op.config |= CFG_AES_LEN_192;
  205. break;
  206. case AES_KEYSIZE_256:
  207. op.config |= CFG_AES_LEN_256;
  208. break;
  209. }
  210. op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
  211. ENC_P_DST(SRAM_DATA_OUT_START);
  212. op.enc_key_p = SRAM_DATA_KEY_P;
  213. setup_data_in();
  214. op.enc_len = cpg->p.crypt_len;
  215. memcpy(cpg->sram + SRAM_CONFIG, &op,
  216. sizeof(struct sec_accel_config));
  217. writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
  218. /* GO */
  219. writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
  220. /*
  221. * XXX: add timer if the interrupt does not occur for some mystery
  222. * reason
  223. */
  224. }
  225. static void mv_crypto_algo_completion(void)
  226. {
  227. struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
  228. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  229. sg_miter_stop(&cpg->p.src_sg_it);
  230. sg_miter_stop(&cpg->p.dst_sg_it);
  231. if (req_ctx->op != COP_AES_CBC)
  232. return ;
  233. memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
  234. }
  235. static void dequeue_complete_req(void)
  236. {
  237. struct crypto_async_request *req = cpg->cur_req;
  238. void *buf;
  239. int ret;
  240. cpg->p.hw_processed_bytes += cpg->p.crypt_len;
  241. if (cpg->p.copy_back) {
  242. int need_copy_len = cpg->p.crypt_len;
  243. int sram_offset = 0;
  244. do {
  245. int dst_copy;
  246. if (!cpg->p.sg_dst_left) {
  247. ret = sg_miter_next(&cpg->p.dst_sg_it);
  248. BUG_ON(!ret);
  249. cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
  250. cpg->p.dst_start = 0;
  251. }
  252. buf = cpg->p.dst_sg_it.addr;
  253. buf += cpg->p.dst_start;
  254. dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
  255. memcpy(buf,
  256. cpg->sram + SRAM_DATA_OUT_START + sram_offset,
  257. dst_copy);
  258. sram_offset += dst_copy;
  259. cpg->p.sg_dst_left -= dst_copy;
  260. need_copy_len -= dst_copy;
  261. cpg->p.dst_start += dst_copy;
  262. } while (need_copy_len > 0);
  263. }
  264. BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
  265. if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
  266. /* process next scatter list entry */
  267. cpg->eng_st = ENGINE_BUSY;
  268. cpg->p.process(0);
  269. } else {
  270. cpg->p.complete();
  271. cpg->eng_st = ENGINE_IDLE;
  272. local_bh_disable();
  273. req->complete(req, 0);
  274. local_bh_enable();
  275. }
  276. }
  277. static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
  278. {
  279. int i = 0;
  280. size_t cur_len;
  281. while (1) {
  282. cur_len = sl[i].length;
  283. ++i;
  284. if (total_bytes > cur_len)
  285. total_bytes -= cur_len;
  286. else
  287. break;
  288. }
  289. return i;
  290. }
  291. static void mv_enqueue_new_req(struct ablkcipher_request *req)
  292. {
  293. struct req_progress *p = &cpg->p;
  294. int num_sgs;
  295. cpg->cur_req = &req->base;
  296. memset(p, 0, sizeof(struct req_progress));
  297. p->hw_nbytes = req->nbytes;
  298. p->complete = mv_crypto_algo_completion;
  299. p->process = mv_process_current_q;
  300. p->copy_back = 1;
  301. num_sgs = count_sgs(req->src, req->nbytes);
  302. sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
  303. num_sgs = count_sgs(req->dst, req->nbytes);
  304. sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
  305. mv_process_current_q(1);
  306. }
  307. static int queue_manag(void *data)
  308. {
  309. cpg->eng_st = ENGINE_IDLE;
  310. do {
  311. struct ablkcipher_request *req;
  312. struct crypto_async_request *async_req = NULL;
  313. struct crypto_async_request *backlog;
  314. __set_current_state(TASK_INTERRUPTIBLE);
  315. if (cpg->eng_st == ENGINE_W_DEQUEUE)
  316. dequeue_complete_req();
  317. spin_lock_irq(&cpg->lock);
  318. if (cpg->eng_st == ENGINE_IDLE) {
  319. backlog = crypto_get_backlog(&cpg->queue);
  320. async_req = crypto_dequeue_request(&cpg->queue);
  321. if (async_req) {
  322. BUG_ON(cpg->eng_st != ENGINE_IDLE);
  323. cpg->eng_st = ENGINE_BUSY;
  324. }
  325. }
  326. spin_unlock_irq(&cpg->lock);
  327. if (backlog) {
  328. backlog->complete(backlog, -EINPROGRESS);
  329. backlog = NULL;
  330. }
  331. if (async_req) {
  332. req = container_of(async_req,
  333. struct ablkcipher_request, base);
  334. mv_enqueue_new_req(req);
  335. async_req = NULL;
  336. }
  337. schedule();
  338. } while (!kthread_should_stop());
  339. return 0;
  340. }
  341. static int mv_handle_req(struct crypto_async_request *req)
  342. {
  343. unsigned long flags;
  344. int ret;
  345. spin_lock_irqsave(&cpg->lock, flags);
  346. ret = crypto_enqueue_request(&cpg->queue, req);
  347. spin_unlock_irqrestore(&cpg->lock, flags);
  348. wake_up_process(cpg->queue_th);
  349. return ret;
  350. }
  351. static int mv_enc_aes_ecb(struct ablkcipher_request *req)
  352. {
  353. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  354. req_ctx->op = COP_AES_ECB;
  355. req_ctx->decrypt = 0;
  356. return mv_handle_req(&req->base);
  357. }
  358. static int mv_dec_aes_ecb(struct ablkcipher_request *req)
  359. {
  360. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  361. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  362. req_ctx->op = COP_AES_ECB;
  363. req_ctx->decrypt = 1;
  364. compute_aes_dec_key(ctx);
  365. return mv_handle_req(&req->base);
  366. }
  367. static int mv_enc_aes_cbc(struct ablkcipher_request *req)
  368. {
  369. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  370. req_ctx->op = COP_AES_CBC;
  371. req_ctx->decrypt = 0;
  372. return mv_handle_req(&req->base);
  373. }
  374. static int mv_dec_aes_cbc(struct ablkcipher_request *req)
  375. {
  376. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  377. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  378. req_ctx->op = COP_AES_CBC;
  379. req_ctx->decrypt = 1;
  380. compute_aes_dec_key(ctx);
  381. return mv_handle_req(&req->base);
  382. }
  383. static int mv_cra_init(struct crypto_tfm *tfm)
  384. {
  385. tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
  386. return 0;
  387. }
  388. irqreturn_t crypto_int(int irq, void *priv)
  389. {
  390. u32 val;
  391. val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
  392. if (!(val & SEC_INT_ACCEL0_DONE))
  393. return IRQ_NONE;
  394. val &= ~SEC_INT_ACCEL0_DONE;
  395. writel(val, cpg->reg + FPGA_INT_STATUS);
  396. writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
  397. BUG_ON(cpg->eng_st != ENGINE_BUSY);
  398. cpg->eng_st = ENGINE_W_DEQUEUE;
  399. wake_up_process(cpg->queue_th);
  400. return IRQ_HANDLED;
  401. }
  402. struct crypto_alg mv_aes_alg_ecb = {
  403. .cra_name = "ecb(aes)",
  404. .cra_driver_name = "mv-ecb-aes",
  405. .cra_priority = 300,
  406. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  407. .cra_blocksize = 16,
  408. .cra_ctxsize = sizeof(struct mv_ctx),
  409. .cra_alignmask = 0,
  410. .cra_type = &crypto_ablkcipher_type,
  411. .cra_module = THIS_MODULE,
  412. .cra_init = mv_cra_init,
  413. .cra_u = {
  414. .ablkcipher = {
  415. .min_keysize = AES_MIN_KEY_SIZE,
  416. .max_keysize = AES_MAX_KEY_SIZE,
  417. .setkey = mv_setkey_aes,
  418. .encrypt = mv_enc_aes_ecb,
  419. .decrypt = mv_dec_aes_ecb,
  420. },
  421. },
  422. };
  423. struct crypto_alg mv_aes_alg_cbc = {
  424. .cra_name = "cbc(aes)",
  425. .cra_driver_name = "mv-cbc-aes",
  426. .cra_priority = 300,
  427. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  428. .cra_blocksize = AES_BLOCK_SIZE,
  429. .cra_ctxsize = sizeof(struct mv_ctx),
  430. .cra_alignmask = 0,
  431. .cra_type = &crypto_ablkcipher_type,
  432. .cra_module = THIS_MODULE,
  433. .cra_init = mv_cra_init,
  434. .cra_u = {
  435. .ablkcipher = {
  436. .ivsize = AES_BLOCK_SIZE,
  437. .min_keysize = AES_MIN_KEY_SIZE,
  438. .max_keysize = AES_MAX_KEY_SIZE,
  439. .setkey = mv_setkey_aes,
  440. .encrypt = mv_enc_aes_cbc,
  441. .decrypt = mv_dec_aes_cbc,
  442. },
  443. },
  444. };
  445. static int mv_probe(struct platform_device *pdev)
  446. {
  447. struct crypto_priv *cp;
  448. struct resource *res;
  449. int irq;
  450. int ret;
  451. if (cpg) {
  452. printk(KERN_ERR "Second crypto dev?\n");
  453. return -EEXIST;
  454. }
  455. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  456. if (!res)
  457. return -ENXIO;
  458. cp = kzalloc(sizeof(*cp), GFP_KERNEL);
  459. if (!cp)
  460. return -ENOMEM;
  461. spin_lock_init(&cp->lock);
  462. crypto_init_queue(&cp->queue, 50);
  463. cp->reg = ioremap(res->start, res->end - res->start + 1);
  464. if (!cp->reg) {
  465. ret = -ENOMEM;
  466. goto err;
  467. }
  468. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
  469. if (!res) {
  470. ret = -ENXIO;
  471. goto err_unmap_reg;
  472. }
  473. cp->sram_size = res->end - res->start + 1;
  474. cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
  475. cp->sram = ioremap(res->start, cp->sram_size);
  476. if (!cp->sram) {
  477. ret = -ENOMEM;
  478. goto err_unmap_reg;
  479. }
  480. irq = platform_get_irq(pdev, 0);
  481. if (irq < 0 || irq == NO_IRQ) {
  482. ret = irq;
  483. goto err_unmap_sram;
  484. }
  485. cp->irq = irq;
  486. platform_set_drvdata(pdev, cp);
  487. cpg = cp;
  488. cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
  489. if (IS_ERR(cp->queue_th)) {
  490. ret = PTR_ERR(cp->queue_th);
  491. goto err_thread;
  492. }
  493. ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
  494. cp);
  495. if (ret)
  496. goto err_unmap_sram;
  497. writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
  498. writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
  499. ret = crypto_register_alg(&mv_aes_alg_ecb);
  500. if (ret)
  501. goto err_reg;
  502. ret = crypto_register_alg(&mv_aes_alg_cbc);
  503. if (ret)
  504. goto err_unreg_ecb;
  505. return 0;
  506. err_unreg_ecb:
  507. crypto_unregister_alg(&mv_aes_alg_ecb);
  508. err_thread:
  509. free_irq(irq, cp);
  510. err_reg:
  511. kthread_stop(cp->queue_th);
  512. err_unmap_sram:
  513. iounmap(cp->sram);
  514. err_unmap_reg:
  515. iounmap(cp->reg);
  516. err:
  517. kfree(cp);
  518. cpg = NULL;
  519. platform_set_drvdata(pdev, NULL);
  520. return ret;
  521. }
  522. static int mv_remove(struct platform_device *pdev)
  523. {
  524. struct crypto_priv *cp = platform_get_drvdata(pdev);
  525. crypto_unregister_alg(&mv_aes_alg_ecb);
  526. crypto_unregister_alg(&mv_aes_alg_cbc);
  527. kthread_stop(cp->queue_th);
  528. free_irq(cp->irq, cp);
  529. memset(cp->sram, 0, cp->sram_size);
  530. iounmap(cp->sram);
  531. iounmap(cp->reg);
  532. kfree(cp);
  533. cpg = NULL;
  534. return 0;
  535. }
  536. static struct platform_driver marvell_crypto = {
  537. .probe = mv_probe,
  538. .remove = mv_remove,
  539. .driver = {
  540. .owner = THIS_MODULE,
  541. .name = "mv_crypto",
  542. },
  543. };
  544. MODULE_ALIAS("platform:mv_crypto");
  545. static int __init mv_crypto_init(void)
  546. {
  547. return platform_driver_register(&marvell_crypto);
  548. }
  549. module_init(mv_crypto_init);
  550. static void __exit mv_crypto_exit(void)
  551. {
  552. platform_driver_unregister(&marvell_crypto);
  553. }
  554. module_exit(mv_crypto_exit);
  555. MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
  556. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  557. MODULE_LICENSE("GPL");