mv_cesa.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Support for Marvell's crypto engine which can be found on some Orion5X
  3. * boards.
  4. *
  5. * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  6. * License: GPLv2
  7. *
  8. */
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <linux/crypto.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/kthread.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/scatterlist.h>
  17. #include "mv_cesa.h"
  18. /*
  19. * STM:
  20. * /---------------------------------------\
  21. * | | request complete
  22. * \./ |
  23. * IDLE -> new request -> BUSY -> done -> DEQUEUE
  24. * /°\ |
  25. * | | more scatter entries
  26. * \________________/
  27. */
  28. enum engine_status {
  29. ENGINE_IDLE,
  30. ENGINE_BUSY,
  31. ENGINE_W_DEQUEUE,
  32. };
  33. /**
  34. * struct req_progress - used for every crypt request
  35. * @src_sg_it: sg iterator for src
  36. * @dst_sg_it: sg iterator for dst
  37. * @sg_src_left: bytes left in src to process (scatter list)
  38. * @src_start: offset to add to src start position (scatter list)
  39. * @crypt_len: length of current crypt process
  40. * @hw_nbytes: total bytes to process in hw for this request
  41. * @sg_dst_left: bytes left dst to process in this scatter list
  42. * @dst_start: offset to add to dst start position (scatter list)
  43. * @hw_processed_bytes: number of bytes processed by hw (request).
  44. *
  45. * sg helper are used to iterate over the scatterlist. Since the size of the
  46. * SRAM may be less than the scatter size, this struct struct is used to keep
  47. * track of progress within current scatterlist.
  48. */
  49. struct req_progress {
  50. struct sg_mapping_iter src_sg_it;
  51. struct sg_mapping_iter dst_sg_it;
  52. void (*complete) (void);
  53. void (*process) (int is_first);
  54. /* src mostly */
  55. int sg_src_left;
  56. int src_start;
  57. int crypt_len;
  58. int hw_nbytes;
  59. /* dst mostly */
  60. int sg_dst_left;
  61. int dst_start;
  62. int hw_processed_bytes;
  63. };
  64. struct crypto_priv {
  65. void __iomem *reg;
  66. void __iomem *sram;
  67. int irq;
  68. struct task_struct *queue_th;
  69. /* the lock protects queue and eng_st */
  70. spinlock_t lock;
  71. struct crypto_queue queue;
  72. enum engine_status eng_st;
  73. struct crypto_async_request *cur_req;
  74. struct req_progress p;
  75. int max_req_size;
  76. int sram_size;
  77. };
  78. static struct crypto_priv *cpg;
  79. struct mv_ctx {
  80. u8 aes_enc_key[AES_KEY_LEN];
  81. u32 aes_dec_key[8];
  82. int key_len;
  83. u32 need_calc_aes_dkey;
  84. };
  85. enum crypto_op {
  86. COP_AES_ECB,
  87. COP_AES_CBC,
  88. };
  89. struct mv_req_ctx {
  90. enum crypto_op op;
  91. int decrypt;
  92. };
  93. static void compute_aes_dec_key(struct mv_ctx *ctx)
  94. {
  95. struct crypto_aes_ctx gen_aes_key;
  96. int key_pos;
  97. if (!ctx->need_calc_aes_dkey)
  98. return;
  99. crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
  100. key_pos = ctx->key_len + 24;
  101. memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
  102. switch (ctx->key_len) {
  103. case AES_KEYSIZE_256:
  104. key_pos -= 2;
  105. /* fall */
  106. case AES_KEYSIZE_192:
  107. key_pos -= 2;
  108. memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
  109. 4 * 4);
  110. break;
  111. }
  112. ctx->need_calc_aes_dkey = 0;
  113. }
  114. static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
  115. unsigned int len)
  116. {
  117. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  118. struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
  119. switch (len) {
  120. case AES_KEYSIZE_128:
  121. case AES_KEYSIZE_192:
  122. case AES_KEYSIZE_256:
  123. break;
  124. default:
  125. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  126. return -EINVAL;
  127. }
  128. ctx->key_len = len;
  129. ctx->need_calc_aes_dkey = 1;
  130. memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
  131. return 0;
  132. }
  133. static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
  134. {
  135. int ret;
  136. void *sbuf;
  137. int copied = 0;
  138. while (1) {
  139. if (!p->sg_src_left) {
  140. ret = sg_miter_next(&p->src_sg_it);
  141. BUG_ON(!ret);
  142. p->sg_src_left = p->src_sg_it.length;
  143. p->src_start = 0;
  144. }
  145. sbuf = p->src_sg_it.addr + p->src_start;
  146. if (p->sg_src_left <= len - copied) {
  147. memcpy(dbuf + copied, sbuf, p->sg_src_left);
  148. copied += p->sg_src_left;
  149. p->sg_src_left = 0;
  150. if (copied >= len)
  151. break;
  152. } else {
  153. int copy_len = len - copied;
  154. memcpy(dbuf + copied, sbuf, copy_len);
  155. p->src_start += copy_len;
  156. p->sg_src_left -= copy_len;
  157. break;
  158. }
  159. }
  160. }
  161. static void setup_data_in(void)
  162. {
  163. struct req_progress *p = &cpg->p;
  164. p->crypt_len =
  165. min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
  166. copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
  167. p->crypt_len);
  168. }
  169. static void mv_process_current_q(int first_block)
  170. {
  171. struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
  172. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  173. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  174. struct sec_accel_config op;
  175. switch (req_ctx->op) {
  176. case COP_AES_ECB:
  177. op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
  178. break;
  179. case COP_AES_CBC:
  180. default:
  181. op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
  182. op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
  183. ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
  184. if (first_block)
  185. memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
  186. break;
  187. }
  188. if (req_ctx->decrypt) {
  189. op.config |= CFG_DIR_DEC;
  190. memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
  191. AES_KEY_LEN);
  192. } else {
  193. op.config |= CFG_DIR_ENC;
  194. memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
  195. AES_KEY_LEN);
  196. }
  197. switch (ctx->key_len) {
  198. case AES_KEYSIZE_128:
  199. op.config |= CFG_AES_LEN_128;
  200. break;
  201. case AES_KEYSIZE_192:
  202. op.config |= CFG_AES_LEN_192;
  203. break;
  204. case AES_KEYSIZE_256:
  205. op.config |= CFG_AES_LEN_256;
  206. break;
  207. }
  208. op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
  209. ENC_P_DST(SRAM_DATA_OUT_START);
  210. op.enc_key_p = SRAM_DATA_KEY_P;
  211. setup_data_in();
  212. op.enc_len = cpg->p.crypt_len;
  213. memcpy(cpg->sram + SRAM_CONFIG, &op,
  214. sizeof(struct sec_accel_config));
  215. writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
  216. /* GO */
  217. writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
  218. /*
  219. * XXX: add timer if the interrupt does not occur for some mystery
  220. * reason
  221. */
  222. }
  223. static void mv_crypto_algo_completion(void)
  224. {
  225. struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
  226. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  227. sg_miter_stop(&cpg->p.src_sg_it);
  228. sg_miter_stop(&cpg->p.dst_sg_it);
  229. if (req_ctx->op != COP_AES_CBC)
  230. return ;
  231. memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
  232. }
  233. static void dequeue_complete_req(void)
  234. {
  235. struct crypto_async_request *req = cpg->cur_req;
  236. void *buf;
  237. int ret;
  238. int need_copy_len = cpg->p.crypt_len;
  239. int sram_offset = 0;
  240. cpg->p.hw_processed_bytes += cpg->p.crypt_len;
  241. do {
  242. int dst_copy;
  243. if (!cpg->p.sg_dst_left) {
  244. ret = sg_miter_next(&cpg->p.dst_sg_it);
  245. BUG_ON(!ret);
  246. cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
  247. cpg->p.dst_start = 0;
  248. }
  249. buf = cpg->p.dst_sg_it.addr;
  250. buf += cpg->p.dst_start;
  251. dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
  252. memcpy(buf,
  253. cpg->sram + SRAM_DATA_OUT_START + sram_offset,
  254. dst_copy);
  255. sram_offset += dst_copy;
  256. cpg->p.sg_dst_left -= dst_copy;
  257. need_copy_len -= dst_copy;
  258. cpg->p.dst_start += dst_copy;
  259. } while (need_copy_len > 0);
  260. BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
  261. if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
  262. /* process next scatter list entry */
  263. cpg->eng_st = ENGINE_BUSY;
  264. cpg->p.process(0);
  265. } else {
  266. cpg->p.complete();
  267. cpg->eng_st = ENGINE_IDLE;
  268. local_bh_disable();
  269. req->complete(req, 0);
  270. local_bh_enable();
  271. }
  272. }
  273. static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
  274. {
  275. int i = 0;
  276. size_t cur_len;
  277. while (1) {
  278. cur_len = sl[i].length;
  279. ++i;
  280. if (total_bytes > cur_len)
  281. total_bytes -= cur_len;
  282. else
  283. break;
  284. }
  285. return i;
  286. }
  287. static void mv_enqueue_new_req(struct ablkcipher_request *req)
  288. {
  289. struct req_progress *p = &cpg->p;
  290. int num_sgs;
  291. cpg->cur_req = &req->base;
  292. memset(p, 0, sizeof(struct req_progress));
  293. p->hw_nbytes = req->nbytes;
  294. p->complete = mv_crypto_algo_completion;
  295. p->process = mv_process_current_q;
  296. num_sgs = count_sgs(req->src, req->nbytes);
  297. sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
  298. num_sgs = count_sgs(req->dst, req->nbytes);
  299. sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
  300. mv_process_current_q(1);
  301. }
  302. static int queue_manag(void *data)
  303. {
  304. cpg->eng_st = ENGINE_IDLE;
  305. do {
  306. struct ablkcipher_request *req;
  307. struct crypto_async_request *async_req = NULL;
  308. struct crypto_async_request *backlog;
  309. __set_current_state(TASK_INTERRUPTIBLE);
  310. if (cpg->eng_st == ENGINE_W_DEQUEUE)
  311. dequeue_complete_req();
  312. spin_lock_irq(&cpg->lock);
  313. if (cpg->eng_st == ENGINE_IDLE) {
  314. backlog = crypto_get_backlog(&cpg->queue);
  315. async_req = crypto_dequeue_request(&cpg->queue);
  316. if (async_req) {
  317. BUG_ON(cpg->eng_st != ENGINE_IDLE);
  318. cpg->eng_st = ENGINE_BUSY;
  319. }
  320. }
  321. spin_unlock_irq(&cpg->lock);
  322. if (backlog) {
  323. backlog->complete(backlog, -EINPROGRESS);
  324. backlog = NULL;
  325. }
  326. if (async_req) {
  327. req = container_of(async_req,
  328. struct ablkcipher_request, base);
  329. mv_enqueue_new_req(req);
  330. async_req = NULL;
  331. }
  332. schedule();
  333. } while (!kthread_should_stop());
  334. return 0;
  335. }
  336. static int mv_handle_req(struct crypto_async_request *req)
  337. {
  338. unsigned long flags;
  339. int ret;
  340. spin_lock_irqsave(&cpg->lock, flags);
  341. ret = crypto_enqueue_request(&cpg->queue, req);
  342. spin_unlock_irqrestore(&cpg->lock, flags);
  343. wake_up_process(cpg->queue_th);
  344. return ret;
  345. }
  346. static int mv_enc_aes_ecb(struct ablkcipher_request *req)
  347. {
  348. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  349. req_ctx->op = COP_AES_ECB;
  350. req_ctx->decrypt = 0;
  351. return mv_handle_req(&req->base);
  352. }
  353. static int mv_dec_aes_ecb(struct ablkcipher_request *req)
  354. {
  355. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  356. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  357. req_ctx->op = COP_AES_ECB;
  358. req_ctx->decrypt = 1;
  359. compute_aes_dec_key(ctx);
  360. return mv_handle_req(&req->base);
  361. }
  362. static int mv_enc_aes_cbc(struct ablkcipher_request *req)
  363. {
  364. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  365. req_ctx->op = COP_AES_CBC;
  366. req_ctx->decrypt = 0;
  367. return mv_handle_req(&req->base);
  368. }
  369. static int mv_dec_aes_cbc(struct ablkcipher_request *req)
  370. {
  371. struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  372. struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  373. req_ctx->op = COP_AES_CBC;
  374. req_ctx->decrypt = 1;
  375. compute_aes_dec_key(ctx);
  376. return mv_handle_req(&req->base);
  377. }
  378. static int mv_cra_init(struct crypto_tfm *tfm)
  379. {
  380. tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
  381. return 0;
  382. }
  383. irqreturn_t crypto_int(int irq, void *priv)
  384. {
  385. u32 val;
  386. val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
  387. if (!(val & SEC_INT_ACCEL0_DONE))
  388. return IRQ_NONE;
  389. val &= ~SEC_INT_ACCEL0_DONE;
  390. writel(val, cpg->reg + FPGA_INT_STATUS);
  391. writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
  392. BUG_ON(cpg->eng_st != ENGINE_BUSY);
  393. cpg->eng_st = ENGINE_W_DEQUEUE;
  394. wake_up_process(cpg->queue_th);
  395. return IRQ_HANDLED;
  396. }
  397. struct crypto_alg mv_aes_alg_ecb = {
  398. .cra_name = "ecb(aes)",
  399. .cra_driver_name = "mv-ecb-aes",
  400. .cra_priority = 300,
  401. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  402. .cra_blocksize = 16,
  403. .cra_ctxsize = sizeof(struct mv_ctx),
  404. .cra_alignmask = 0,
  405. .cra_type = &crypto_ablkcipher_type,
  406. .cra_module = THIS_MODULE,
  407. .cra_init = mv_cra_init,
  408. .cra_u = {
  409. .ablkcipher = {
  410. .min_keysize = AES_MIN_KEY_SIZE,
  411. .max_keysize = AES_MAX_KEY_SIZE,
  412. .setkey = mv_setkey_aes,
  413. .encrypt = mv_enc_aes_ecb,
  414. .decrypt = mv_dec_aes_ecb,
  415. },
  416. },
  417. };
  418. struct crypto_alg mv_aes_alg_cbc = {
  419. .cra_name = "cbc(aes)",
  420. .cra_driver_name = "mv-cbc-aes",
  421. .cra_priority = 300,
  422. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  423. .cra_blocksize = AES_BLOCK_SIZE,
  424. .cra_ctxsize = sizeof(struct mv_ctx),
  425. .cra_alignmask = 0,
  426. .cra_type = &crypto_ablkcipher_type,
  427. .cra_module = THIS_MODULE,
  428. .cra_init = mv_cra_init,
  429. .cra_u = {
  430. .ablkcipher = {
  431. .ivsize = AES_BLOCK_SIZE,
  432. .min_keysize = AES_MIN_KEY_SIZE,
  433. .max_keysize = AES_MAX_KEY_SIZE,
  434. .setkey = mv_setkey_aes,
  435. .encrypt = mv_enc_aes_cbc,
  436. .decrypt = mv_dec_aes_cbc,
  437. },
  438. },
  439. };
  440. static int mv_probe(struct platform_device *pdev)
  441. {
  442. struct crypto_priv *cp;
  443. struct resource *res;
  444. int irq;
  445. int ret;
  446. if (cpg) {
  447. printk(KERN_ERR "Second crypto dev?\n");
  448. return -EEXIST;
  449. }
  450. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  451. if (!res)
  452. return -ENXIO;
  453. cp = kzalloc(sizeof(*cp), GFP_KERNEL);
  454. if (!cp)
  455. return -ENOMEM;
  456. spin_lock_init(&cp->lock);
  457. crypto_init_queue(&cp->queue, 50);
  458. cp->reg = ioremap(res->start, res->end - res->start + 1);
  459. if (!cp->reg) {
  460. ret = -ENOMEM;
  461. goto err;
  462. }
  463. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
  464. if (!res) {
  465. ret = -ENXIO;
  466. goto err_unmap_reg;
  467. }
  468. cp->sram_size = res->end - res->start + 1;
  469. cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
  470. cp->sram = ioremap(res->start, cp->sram_size);
  471. if (!cp->sram) {
  472. ret = -ENOMEM;
  473. goto err_unmap_reg;
  474. }
  475. irq = platform_get_irq(pdev, 0);
  476. if (irq < 0 || irq == NO_IRQ) {
  477. ret = irq;
  478. goto err_unmap_sram;
  479. }
  480. cp->irq = irq;
  481. platform_set_drvdata(pdev, cp);
  482. cpg = cp;
  483. cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
  484. if (IS_ERR(cp->queue_th)) {
  485. ret = PTR_ERR(cp->queue_th);
  486. goto err_thread;
  487. }
  488. ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
  489. cp);
  490. if (ret)
  491. goto err_unmap_sram;
  492. writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
  493. writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
  494. ret = crypto_register_alg(&mv_aes_alg_ecb);
  495. if (ret)
  496. goto err_reg;
  497. ret = crypto_register_alg(&mv_aes_alg_cbc);
  498. if (ret)
  499. goto err_unreg_ecb;
  500. return 0;
  501. err_unreg_ecb:
  502. crypto_unregister_alg(&mv_aes_alg_ecb);
  503. err_thread:
  504. free_irq(irq, cp);
  505. err_reg:
  506. kthread_stop(cp->queue_th);
  507. err_unmap_sram:
  508. iounmap(cp->sram);
  509. err_unmap_reg:
  510. iounmap(cp->reg);
  511. err:
  512. kfree(cp);
  513. cpg = NULL;
  514. platform_set_drvdata(pdev, NULL);
  515. return ret;
  516. }
  517. static int mv_remove(struct platform_device *pdev)
  518. {
  519. struct crypto_priv *cp = platform_get_drvdata(pdev);
  520. crypto_unregister_alg(&mv_aes_alg_ecb);
  521. crypto_unregister_alg(&mv_aes_alg_cbc);
  522. kthread_stop(cp->queue_th);
  523. free_irq(cp->irq, cp);
  524. memset(cp->sram, 0, cp->sram_size);
  525. iounmap(cp->sram);
  526. iounmap(cp->reg);
  527. kfree(cp);
  528. cpg = NULL;
  529. return 0;
  530. }
  531. static struct platform_driver marvell_crypto = {
  532. .probe = mv_probe,
  533. .remove = mv_remove,
  534. .driver = {
  535. .owner = THIS_MODULE,
  536. .name = "mv_crypto",
  537. },
  538. };
  539. MODULE_ALIAS("platform:mv_crypto");
  540. static int __init mv_crypto_init(void)
  541. {
  542. return platform_driver_register(&marvell_crypto);
  543. }
  544. module_init(mv_crypto_init);
  545. static void __exit mv_crypto_exit(void)
  546. {
  547. platform_driver_unregister(&marvell_crypto);
  548. }
  549. module_exit(mv_crypto_exit);
  550. MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
  551. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  552. MODULE_LICENSE("GPL");