dcp.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for DCP cryptographic accelerator.
  5. *
  6. * Copyright (c) 2013
  7. * Author: Tobias Rauter <tobias.rauter@gmail.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as published
  11. * by the Free Software Foundation.
  12. *
  13. * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/errno.h>
  18. #include <linux/kernel.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/io.h>
  22. #include <linux/mutex.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/completion.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/delay.h>
  27. #include <linux/crypto.h>
  28. #include <linux/miscdevice.h>
  29. #include <crypto/scatterwalk.h>
  30. #include <crypto/aes.h>
  31. /* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
  32. #define DBS_IOCTL_BASE 'd'
  33. #define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
  34. #define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
  35. /* DCP channel used for AES */
  36. #define USED_CHANNEL 1
  37. /* Ring Buffers' maximum size */
  38. #define DCP_MAX_PKG 20
  39. /* Control Register */
  40. #define DCP_REG_CTRL 0x000
  41. #define DCP_CTRL_SFRST (1<<31)
  42. #define DCP_CTRL_CLKGATE (1<<30)
  43. #define DCP_CTRL_CRYPTO_PRESENT (1<<29)
  44. #define DCP_CTRL_SHA_PRESENT (1<<28)
  45. #define DCP_CTRL_GATHER_RES_WRITE (1<<23)
  46. #define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
  47. #define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
  48. #define DCP_CTRL_CH_IRQ_E_0 0x01
  49. #define DCP_CTRL_CH_IRQ_E_1 0x02
  50. #define DCP_CTRL_CH_IRQ_E_2 0x04
  51. #define DCP_CTRL_CH_IRQ_E_3 0x08
  52. /* Status register */
  53. #define DCP_REG_STAT 0x010
  54. #define DCP_STAT_OTP_KEY_READY (1<<28)
  55. #define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
  56. #define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
  57. #define DCP_STAT_IRQ(stat) (stat&0x0F)
  58. #define DCP_STAT_CHAN_0 (0x01)
  59. #define DCP_STAT_CHAN_1 (0x02)
  60. #define DCP_STAT_CHAN_2 (0x04)
  61. #define DCP_STAT_CHAN_3 (0x08)
  62. /* Channel Control Register */
  63. #define DCP_REG_CHAN_CTRL 0x020
  64. #define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
  65. #define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
  66. #define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
  67. #define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
  68. #define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
  69. #define DCP_CHAN_CTRL_ENABLE_0 (0x01)
  70. #define DCP_CHAN_CTRL_ENABLE_1 (0x02)
  71. #define DCP_CHAN_CTRL_ENABLE_2 (0x04)
  72. #define DCP_CHAN_CTRL_ENABLE_3 (0x08)
  73. /*
  74. * Channel Registers:
  75. * The DCP has 4 channels. Each of this channels
  76. * has 4 registers (command pointer, semaphore, status and options).
  77. * The address of register REG of channel CHAN is obtained by
  78. * dcp_chan_reg(REG, CHAN)
  79. */
  80. #define DCP_REG_CHAN_PTR 0x00000100
  81. #define DCP_REG_CHAN_SEMA 0x00000110
  82. #define DCP_REG_CHAN_STAT 0x00000120
  83. #define DCP_REG_CHAN_OPT 0x00000130
  84. #define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
  85. #define DCP_CHAN_STAT_NO_CHAIN 0x020000
  86. #define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
  87. #define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
  88. #define DCP_CHAN_STAT_INVALID_MODE 0x050000
  89. #define DCP_CHAN_STAT_PAGEFAULT 0x40
  90. #define DCP_CHAN_STAT_DST 0x20
  91. #define DCP_CHAN_STAT_SRC 0x10
  92. #define DCP_CHAN_STAT_PACKET 0x08
  93. #define DCP_CHAN_STAT_SETUP 0x04
  94. #define DCP_CHAN_STAT_MISMATCH 0x02
  95. /* hw packet control*/
  96. #define DCP_PKT_PAYLOAD_KEY (1<<11)
  97. #define DCP_PKT_OTP_KEY (1<<10)
  98. #define DCP_PKT_CIPHER_INIT (1<<9)
  99. #define DCP_PKG_CIPHER_ENCRYPT (1<<8)
  100. #define DCP_PKT_CIPHER_ENABLE (1<<5)
  101. #define DCP_PKT_DECR_SEM (1<<1)
  102. #define DCP_PKT_CHAIN (1<<2)
  103. #define DCP_PKT_IRQ 1
  104. #define DCP_PKT_MODE_CBC (1<<4)
  105. #define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
  106. /* cipher flags */
  107. #define DCP_ENC 0x0001
  108. #define DCP_DEC 0x0002
  109. #define DCP_ECB 0x0004
  110. #define DCP_CBC 0x0008
  111. #define DCP_CBC_INIT 0x0010
  112. #define DCP_NEW_KEY 0x0040
  113. #define DCP_OTP_KEY 0x0080
  114. #define DCP_AES 0x1000
  115. /* DCP Flags */
  116. #define DCP_FLAG_BUSY 0x01
  117. #define DCP_FLAG_PRODUCING 0x02
  118. /* clock defines */
  119. #define CLOCK_ON 1
  120. #define CLOCK_OFF 0
  121. struct dcp_dev_req_ctx {
  122. int mode;
  123. };
  124. struct dcp_op {
  125. unsigned int flags;
  126. u8 key[AES_KEYSIZE_128];
  127. int keylen;
  128. struct ablkcipher_request *req;
  129. struct crypto_ablkcipher *fallback;
  130. uint32_t stat;
  131. uint32_t pkt1;
  132. uint32_t pkt2;
  133. struct ablkcipher_walk walk;
  134. };
  135. struct dcp_dev {
  136. struct device *dev;
  137. void __iomem *dcp_regs_base;
  138. int dcp_vmi_irq;
  139. int dcp_irq;
  140. spinlock_t queue_lock;
  141. struct crypto_queue queue;
  142. uint32_t pkt_produced;
  143. uint32_t pkt_consumed;
  144. struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
  145. dma_addr_t hw_phys_pkg;
  146. /* [KEY][IV] Both with 16 Bytes */
  147. u8 *payload_base;
  148. dma_addr_t payload_base_dma;
  149. struct tasklet_struct done_task;
  150. struct tasklet_struct queue_task;
  151. struct timer_list watchdog;
  152. unsigned long flags;
  153. struct dcp_op *ctx;
  154. struct miscdevice dcp_bootstream_misc;
  155. };
  156. struct dcp_hw_packet {
  157. uint32_t next;
  158. uint32_t pkt1;
  159. uint32_t pkt2;
  160. uint32_t src;
  161. uint32_t dst;
  162. uint32_t size;
  163. uint32_t payload;
  164. uint32_t stat;
  165. };
  166. static struct dcp_dev *global_dev;
  167. static inline u32 dcp_chan_reg(u32 reg, int chan)
  168. {
  169. return reg + (chan) * 0x40;
  170. }
  171. static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
  172. {
  173. writel(data, dev->dcp_regs_base + reg);
  174. }
  175. static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
  176. {
  177. writel(data, dev->dcp_regs_base + (reg | 0x04));
  178. }
  179. static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
  180. {
  181. writel(data, dev->dcp_regs_base + (reg | 0x08));
  182. }
  183. static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
  184. {
  185. writel(data, dev->dcp_regs_base + (reg | 0x0C));
  186. }
  187. static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
  188. {
  189. return readl(dev->dcp_regs_base + reg);
  190. }
  191. static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
  192. {
  193. dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
  194. dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
  195. dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
  196. }
  197. static int dcp_dma_map(struct dcp_dev *dev,
  198. struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
  199. {
  200. dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
  201. /* align to length = 16 */
  202. pkt->size = walk->nbytes - (walk->nbytes % 16);
  203. pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
  204. pkt->size, DMA_TO_DEVICE);
  205. if (pkt->src == 0) {
  206. dev_err(dev->dev, "Unable to map src");
  207. return -ENOMEM;
  208. }
  209. pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
  210. pkt->size, DMA_FROM_DEVICE);
  211. if (pkt->dst == 0) {
  212. dev_err(dev->dev, "Unable to map dst");
  213. dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
  214. return -ENOMEM;
  215. }
  216. return 0;
  217. }
  218. static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
  219. uint8_t last)
  220. {
  221. struct dcp_op *ctx = dev->ctx;
  222. pkt->pkt1 = ctx->pkt1;
  223. pkt->pkt2 = ctx->pkt2;
  224. pkt->payload = (u32) dev->payload_base_dma;
  225. pkt->stat = 0;
  226. if (ctx->flags & DCP_CBC_INIT) {
  227. pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
  228. ctx->flags &= ~DCP_CBC_INIT;
  229. }
  230. mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
  231. pkt->pkt1 |= DCP_PKT_IRQ;
  232. if (!last)
  233. pkt->pkt1 |= DCP_PKT_CHAIN;
  234. dev->pkt_produced++;
  235. dcp_write(dev, 1,
  236. dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
  237. }
  238. static void dcp_op_proceed(struct dcp_dev *dev)
  239. {
  240. struct dcp_op *ctx = dev->ctx;
  241. struct dcp_hw_packet *pkt;
  242. while (ctx->walk.nbytes) {
  243. int err = 0;
  244. pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
  245. err = dcp_dma_map(dev, &ctx->walk, pkt);
  246. if (err) {
  247. dev->ctx->stat |= err;
  248. /* start timer to wait for already set up calls */
  249. mod_timer(&dev->watchdog,
  250. jiffies + msecs_to_jiffies(500));
  251. break;
  252. }
  253. err = ctx->walk.nbytes - pkt->size;
  254. ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
  255. dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
  256. /* we have to wait if no space is left in buffer */
  257. if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
  258. break;
  259. }
  260. clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
  261. }
  262. static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
  263. {
  264. struct dcp_op *ctx = dev->ctx;
  265. if (ctx->flags & DCP_NEW_KEY) {
  266. memcpy(dev->payload_base, ctx->key, ctx->keylen);
  267. ctx->flags &= ~DCP_NEW_KEY;
  268. }
  269. ctx->pkt1 = 0;
  270. ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
  271. ctx->pkt1 |= DCP_PKT_DECR_SEM;
  272. if (ctx->flags & DCP_OTP_KEY)
  273. ctx->pkt1 |= DCP_PKT_OTP_KEY;
  274. else
  275. ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
  276. if (ctx->flags & DCP_ENC)
  277. ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
  278. ctx->pkt2 = 0;
  279. if (ctx->flags & DCP_CBC)
  280. ctx->pkt2 |= DCP_PKT_MODE_CBC;
  281. dev->pkt_produced = 0;
  282. dev->pkt_consumed = 0;
  283. ctx->stat = 0;
  284. dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
  285. dcp_write(dev, (u32) dev->hw_phys_pkg,
  286. dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
  287. set_bit(DCP_FLAG_PRODUCING, &dev->flags);
  288. if (use_walk) {
  289. ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
  290. ctx->req->src, ctx->req->nbytes);
  291. ablkcipher_walk_phys(ctx->req, &ctx->walk);
  292. dcp_op_proceed(dev);
  293. } else {
  294. dcp_op_one(dev, dev->hw_pkg[0], 1);
  295. clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
  296. }
  297. }
  298. static void dcp_done_task(unsigned long data)
  299. {
  300. struct dcp_dev *dev = (struct dcp_dev *)data;
  301. struct dcp_hw_packet *last_packet;
  302. int fin;
  303. fin = 0;
  304. for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
  305. last_packet->stat == 1;
  306. last_packet =
  307. dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
  308. dcp_dma_unmap(dev, last_packet);
  309. last_packet->stat = 0;
  310. fin++;
  311. }
  312. /* the last call of this function already consumed this IRQ's packet */
  313. if (fin == 0)
  314. return;
  315. dev_dbg(dev->dev,
  316. "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
  317. dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
  318. last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
  319. if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
  320. if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
  321. dcp_op_proceed(dev);
  322. return;
  323. }
  324. while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
  325. dcp_dma_unmap(dev,
  326. dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
  327. }
  328. if (dev->ctx->flags & DCP_OTP_KEY) {
  329. /* we used the miscdevice, no walk to finish */
  330. clear_bit(DCP_FLAG_BUSY, &dev->flags);
  331. return;
  332. }
  333. ablkcipher_walk_complete(&dev->ctx->walk);
  334. dev->ctx->req->base.complete(&dev->ctx->req->base,
  335. dev->ctx->stat);
  336. dev->ctx->req = NULL;
  337. /* in case there are other requests in the queue */
  338. tasklet_schedule(&dev->queue_task);
  339. }
  340. static void dcp_watchdog(unsigned long data)
  341. {
  342. struct dcp_dev *dev = (struct dcp_dev *)data;
  343. dev->ctx->stat |= dcp_read(dev,
  344. dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
  345. dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
  346. if (!dev->ctx->stat)
  347. dev->ctx->stat = -ETIMEDOUT;
  348. dcp_done_task(data);
  349. }
  350. static irqreturn_t dcp_common_irq(int irq, void *context)
  351. {
  352. u32 msk;
  353. struct dcp_dev *dev = (struct dcp_dev *) context;
  354. del_timer(&dev->watchdog);
  355. msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
  356. dcp_clear(dev, msk, DCP_REG_STAT);
  357. if (msk == 0)
  358. return IRQ_NONE;
  359. dev->ctx->stat |= dcp_read(dev,
  360. dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
  361. if (msk & DCP_STAT_CHAN_1)
  362. tasklet_schedule(&dev->done_task);
  363. return IRQ_HANDLED;
  364. }
  365. static irqreturn_t dcp_vmi_irq(int irq, void *context)
  366. {
  367. return dcp_common_irq(irq, context);
  368. }
  369. static irqreturn_t dcp_irq(int irq, void *context)
  370. {
  371. return dcp_common_irq(irq, context);
  372. }
  373. static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
  374. {
  375. dev->ctx = ctx;
  376. if ((ctx->flags & DCP_CBC) && ctx->req->info) {
  377. ctx->flags |= DCP_CBC_INIT;
  378. memcpy(dev->payload_base + AES_KEYSIZE_128,
  379. ctx->req->info, AES_KEYSIZE_128);
  380. }
  381. dcp_op_start(dev, 1);
  382. }
  383. static void dcp_queue_task(unsigned long data)
  384. {
  385. struct dcp_dev *dev = (struct dcp_dev *) data;
  386. struct crypto_async_request *async_req, *backlog;
  387. struct crypto_ablkcipher *tfm;
  388. struct dcp_op *ctx;
  389. struct dcp_dev_req_ctx *rctx;
  390. struct ablkcipher_request *req;
  391. unsigned long flags;
  392. spin_lock_irqsave(&dev->queue_lock, flags);
  393. backlog = crypto_get_backlog(&dev->queue);
  394. async_req = crypto_dequeue_request(&dev->queue);
  395. spin_unlock_irqrestore(&dev->queue_lock, flags);
  396. if (!async_req)
  397. goto ret_nothing_done;
  398. if (backlog)
  399. backlog->complete(backlog, -EINPROGRESS);
  400. req = ablkcipher_request_cast(async_req);
  401. tfm = crypto_ablkcipher_reqtfm(req);
  402. rctx = ablkcipher_request_ctx(req);
  403. ctx = crypto_ablkcipher_ctx(tfm);
  404. if (!req->src || !req->dst)
  405. goto ret_nothing_done;
  406. ctx->flags |= rctx->mode;
  407. ctx->req = req;
  408. dcp_crypt(dev, ctx);
  409. return;
  410. ret_nothing_done:
  411. clear_bit(DCP_FLAG_BUSY, &dev->flags);
  412. }
  413. static int dcp_cra_init(struct crypto_tfm *tfm)
  414. {
  415. const char *name = tfm->__crt_alg->cra_name;
  416. struct dcp_op *ctx = crypto_tfm_ctx(tfm);
  417. tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
  418. ctx->fallback = crypto_alloc_ablkcipher(name, 0,
  419. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  420. if (IS_ERR(ctx->fallback)) {
  421. dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
  422. name);
  423. return PTR_ERR(ctx->fallback);
  424. }
  425. return 0;
  426. }
  427. static void dcp_cra_exit(struct crypto_tfm *tfm)
  428. {
  429. struct dcp_op *ctx = crypto_tfm_ctx(tfm);
  430. if (ctx->fallback)
  431. crypto_free_ablkcipher(ctx->fallback);
  432. ctx->fallback = NULL;
  433. }
  434. /* async interface */
  435. static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  436. unsigned int len)
  437. {
  438. struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
  439. unsigned int ret = 0;
  440. ctx->keylen = len;
  441. ctx->flags = 0;
  442. if (len == AES_KEYSIZE_128) {
  443. if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
  444. memcpy(ctx->key, key, len);
  445. ctx->flags |= DCP_NEW_KEY;
  446. }
  447. return 0;
  448. }
  449. ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  450. ctx->fallback->base.crt_flags |=
  451. (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  452. ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
  453. if (ret) {
  454. struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
  455. tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  456. tfm_aux->crt_flags |=
  457. (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
  458. }
  459. return ret;
  460. }
  461. static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
  462. {
  463. struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
  464. struct dcp_dev *dev = global_dev;
  465. unsigned long flags;
  466. int err = 0;
  467. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
  468. return -EINVAL;
  469. rctx->mode = mode;
  470. spin_lock_irqsave(&dev->queue_lock, flags);
  471. err = ablkcipher_enqueue_request(&dev->queue, req);
  472. spin_unlock_irqrestore(&dev->queue_lock, flags);
  473. flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
  474. if (!(flags & DCP_FLAG_BUSY))
  475. tasklet_schedule(&dev->queue_task);
  476. return err;
  477. }
  478. static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
  479. {
  480. struct crypto_tfm *tfm =
  481. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  482. struct dcp_op *ctx = crypto_ablkcipher_ctx(
  483. crypto_ablkcipher_reqtfm(req));
  484. if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
  485. int err = 0;
  486. ablkcipher_request_set_tfm(req, ctx->fallback);
  487. err = crypto_ablkcipher_encrypt(req);
  488. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
  489. return err;
  490. }
  491. return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
  492. }
  493. static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
  494. {
  495. struct crypto_tfm *tfm =
  496. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  497. struct dcp_op *ctx = crypto_ablkcipher_ctx(
  498. crypto_ablkcipher_reqtfm(req));
  499. if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
  500. int err = 0;
  501. ablkcipher_request_set_tfm(req, ctx->fallback);
  502. err = crypto_ablkcipher_decrypt(req);
  503. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
  504. return err;
  505. }
  506. return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
  507. }
  508. static struct crypto_alg algs[] = {
  509. {
  510. .cra_name = "cbc(aes)",
  511. .cra_driver_name = "dcp-cbc-aes",
  512. .cra_alignmask = 3,
  513. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  514. CRYPTO_ALG_NEED_FALLBACK,
  515. .cra_blocksize = AES_KEYSIZE_128,
  516. .cra_type = &crypto_ablkcipher_type,
  517. .cra_priority = 300,
  518. .cra_u.ablkcipher = {
  519. .min_keysize = AES_KEYSIZE_128,
  520. .max_keysize = AES_KEYSIZE_128,
  521. .setkey = dcp_aes_setkey,
  522. .encrypt = dcp_aes_cbc_encrypt,
  523. .decrypt = dcp_aes_cbc_decrypt,
  524. .ivsize = AES_KEYSIZE_128,
  525. }
  526. },
  527. };
  528. /* DCP bootstream verification interface: uses OTP key for crypto */
  529. static int dcp_bootstream_open(struct inode *inode, struct file *file)
  530. {
  531. file->private_data = container_of((file->private_data),
  532. struct dcp_dev, dcp_bootstream_misc);
  533. return 0;
  534. }
  535. static long dcp_bootstream_ioctl(struct file *file,
  536. unsigned int cmd, unsigned long arg)
  537. {
  538. struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
  539. void __user *argp = (void __user *)arg;
  540. int ret;
  541. if (dev == NULL)
  542. return -EBADF;
  543. if (cmd != DBS_ENC && cmd != DBS_DEC)
  544. return -EINVAL;
  545. if (copy_from_user(dev->payload_base, argp, 16))
  546. return -EFAULT;
  547. if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
  548. return -EAGAIN;
  549. dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
  550. if (!dev->ctx) {
  551. dev_err(dev->dev,
  552. "cannot allocate context for OTP crypto");
  553. clear_bit(DCP_FLAG_BUSY, &dev->flags);
  554. return -ENOMEM;
  555. }
  556. dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
  557. dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
  558. dev->hw_pkg[0]->src = dev->payload_base_dma;
  559. dev->hw_pkg[0]->dst = dev->payload_base_dma;
  560. dev->hw_pkg[0]->size = 16;
  561. dcp_op_start(dev, 0);
  562. while (test_bit(DCP_FLAG_BUSY, &dev->flags))
  563. cpu_relax();
  564. ret = dev->ctx->stat;
  565. if (!ret && copy_to_user(argp, dev->payload_base, 16))
  566. ret = -EFAULT;
  567. kfree(dev->ctx);
  568. return ret;
  569. }
  570. static const struct file_operations dcp_bootstream_fops = {
  571. .owner = THIS_MODULE,
  572. .unlocked_ioctl = dcp_bootstream_ioctl,
  573. .open = dcp_bootstream_open,
  574. };
  575. static int dcp_probe(struct platform_device *pdev)
  576. {
  577. struct dcp_dev *dev = NULL;
  578. struct resource *r;
  579. int i, ret, j;
  580. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  581. if (!dev)
  582. return -ENOMEM;
  583. global_dev = dev;
  584. dev->dev = &pdev->dev;
  585. platform_set_drvdata(pdev, dev);
  586. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  587. if (!r) {
  588. dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
  589. return -ENXIO;
  590. }
  591. dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
  592. resource_size(r));
  593. dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
  594. udelay(10);
  595. dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
  596. dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
  597. DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
  598. DCP_REG_CTRL);
  599. dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
  600. for (i = 0; i < 4; i++)
  601. dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
  602. dcp_clear(dev, -1, DCP_REG_STAT);
  603. r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  604. if (!r) {
  605. dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
  606. return -EIO;
  607. }
  608. dev->dcp_vmi_irq = r->start;
  609. ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
  610. if (ret != 0) {
  611. dev_err(&pdev->dev, "can't request_irq (0)\n");
  612. return -EIO;
  613. }
  614. r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  615. if (!r) {
  616. dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
  617. ret = -EIO;
  618. goto err_free_irq0;
  619. }
  620. dev->dcp_irq = r->start;
  621. ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
  622. if (ret != 0) {
  623. dev_err(&pdev->dev, "can't request_irq (1)\n");
  624. ret = -EIO;
  625. goto err_free_irq0;
  626. }
  627. dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
  628. DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
  629. &dev->hw_phys_pkg,
  630. GFP_KERNEL);
  631. if (!dev->hw_pkg[0]) {
  632. dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
  633. ret = -ENOMEM;
  634. goto err_free_irq1;
  635. }
  636. for (i = 1; i < DCP_MAX_PKG; i++) {
  637. dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
  638. + i * sizeof(struct dcp_hw_packet);
  639. dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
  640. }
  641. dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
  642. dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
  643. &dev->payload_base_dma, GFP_KERNEL);
  644. if (!dev->payload_base) {
  645. dev_err(&pdev->dev, "Could not allocate memory for key\n");
  646. ret = -ENOMEM;
  647. goto err_free_hw_packet;
  648. }
  649. tasklet_init(&dev->queue_task, dcp_queue_task,
  650. (unsigned long) dev);
  651. tasklet_init(&dev->done_task, dcp_done_task,
  652. (unsigned long) dev);
  653. spin_lock_init(&dev->queue_lock);
  654. crypto_init_queue(&dev->queue, 10);
  655. init_timer(&dev->watchdog);
  656. dev->watchdog.function = &dcp_watchdog;
  657. dev->watchdog.data = (unsigned long)dev;
  658. dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
  659. dev->dcp_bootstream_misc.name = "dcpboot",
  660. dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
  661. ret = misc_register(&dev->dcp_bootstream_misc);
  662. if (ret != 0) {
  663. dev_err(dev->dev, "Unable to register misc device\n");
  664. goto err_free_key_iv;
  665. }
  666. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  667. algs[i].cra_priority = 300;
  668. algs[i].cra_ctxsize = sizeof(struct dcp_op);
  669. algs[i].cra_module = THIS_MODULE;
  670. algs[i].cra_init = dcp_cra_init;
  671. algs[i].cra_exit = dcp_cra_exit;
  672. if (crypto_register_alg(&algs[i])) {
  673. dev_err(&pdev->dev, "register algorithm failed\n");
  674. ret = -ENOMEM;
  675. goto err_unregister;
  676. }
  677. }
  678. dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
  679. return 0;
  680. err_unregister:
  681. for (j = 0; j < i; j++)
  682. crypto_unregister_alg(&algs[j]);
  683. err_free_key_iv:
  684. dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
  685. dev->payload_base_dma);
  686. err_free_hw_packet:
  687. dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
  688. sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
  689. dev->hw_phys_pkg);
  690. err_free_irq1:
  691. free_irq(dev->dcp_irq, dev);
  692. err_free_irq0:
  693. free_irq(dev->dcp_vmi_irq, dev);
  694. return ret;
  695. }
  696. static int dcp_remove(struct platform_device *pdev)
  697. {
  698. struct dcp_dev *dev;
  699. int j;
  700. dev = platform_get_drvdata(pdev);
  701. dma_free_coherent(&pdev->dev,
  702. DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
  703. dev->hw_pkg[0], dev->hw_phys_pkg);
  704. dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
  705. dev->payload_base_dma);
  706. free_irq(dev->dcp_irq, dev);
  707. free_irq(dev->dcp_vmi_irq, dev);
  708. tasklet_kill(&dev->done_task);
  709. tasklet_kill(&dev->queue_task);
  710. for (j = 0; j < ARRAY_SIZE(algs); j++)
  711. crypto_unregister_alg(&algs[j]);
  712. misc_deregister(&dev->dcp_bootstream_misc);
  713. return 0;
  714. }
  715. static struct of_device_id fs_dcp_of_match[] = {
  716. { .compatible = "fsl-dcp"},
  717. {},
  718. };
  719. static struct platform_driver fs_dcp_driver = {
  720. .probe = dcp_probe,
  721. .remove = dcp_remove,
  722. .driver = {
  723. .name = "fsl-dcp",
  724. .owner = THIS_MODULE,
  725. .of_match_table = fs_dcp_of_match
  726. }
  727. };
  728. module_platform_driver(fs_dcp_driver);
  729. MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
  730. MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
  731. MODULE_LICENSE("GPL");