tegra-aes.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. /*
  2. * drivers/crypto/tegra-aes.c
  3. *
  4. * Driver for NVIDIA Tegra AES hardware engine residing inside the
  5. * Bit Stream Engine for Video (BSEV) hardware block.
  6. *
  7. * The programming sequence for this engine is with the help
  8. * of commands which travel via a command queue residing between the
  9. * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
  10. * where the final input plaintext, keys and the IV have to be copied
  11. * before starting the encrypt/decrypt operation.
  12. *
  13. * Copyright (c) 2010, NVIDIA Corporation.
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or
  18. * (at your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but WITHOUT
  21. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  22. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  23. * more details.
  24. *
  25. * You should have received a copy of the GNU General Public License along
  26. * with this program; if not, write to the Free Software Foundation, Inc.,
  27. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  28. */
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/errno.h>
  32. #include <linux/kernel.h>
  33. #include <linux/clk.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/io.h>
  38. #include <linux/mutex.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/completion.h>
  41. #include <linux/workqueue.h>
  42. #include <crypto/scatterwalk.h>
  43. #include <crypto/aes.h>
  44. #include <crypto/internal/rng.h>
  45. #include "tegra-aes.h"
  46. #define FLAGS_MODE_MASK 0x00FF
  47. #define FLAGS_ENCRYPT BIT(0)
  48. #define FLAGS_CBC BIT(1)
  49. #define FLAGS_GIV BIT(2)
  50. #define FLAGS_RNG BIT(3)
  51. #define FLAGS_OFB BIT(4)
  52. #define FLAGS_NEW_KEY BIT(5)
  53. #define FLAGS_NEW_IV BIT(6)
  54. #define FLAGS_INIT BIT(7)
  55. #define FLAGS_FAST BIT(8)
  56. #define FLAGS_BUSY 9
  57. /*
  58. * Defines AES engine Max process bytes size in one go, which takes 1 msec.
  59. * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
  60. * The duration CPU can use the BSE to 1 msec, then the number of available
  61. * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
  62. * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
  63. */
  64. #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
  65. /*
  66. * The key table length is 64 bytes
  67. * (This includes first upto 32 bytes key + 16 bytes original initial vector
  68. * and 16 bytes updated initial vector)
  69. */
  70. #define AES_HW_KEY_TABLE_LENGTH_BYTES 64
  71. /*
  72. * The memory being used is divides as follows:
  73. * 1. Key - 32 bytes
  74. * 2. Original IV - 16 bytes
  75. * 3. Updated IV - 16 bytes
  76. * 4. Key schedule - 256 bytes
  77. *
  78. * 1+2+3 constitute the hw key table.
  79. */
  80. #define AES_HW_IV_SIZE 16
  81. #define AES_HW_KEYSCHEDULE_LEN 256
  82. #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
  83. /* Define commands required for AES operation */
  84. enum {
  85. CMD_BLKSTARTENGINE = 0x0E,
  86. CMD_DMASETUP = 0x10,
  87. CMD_DMACOMPLETE = 0x11,
  88. CMD_SETTABLE = 0x15,
  89. CMD_MEMDMAVD = 0x22,
  90. };
  91. /* Define sub-commands */
  92. enum {
  93. SUBCMD_VRAM_SEL = 0x1,
  94. SUBCMD_CRYPTO_TABLE_SEL = 0x3,
  95. SUBCMD_KEY_TABLE_SEL = 0x8,
  96. };
  97. /* memdma_vd command */
  98. #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
  99. #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
  100. #define MEMDMA_DIR_SHIFT 25
  101. #define MEMDMA_NUM_WORDS_SHIFT 12
  102. /* command queue bit shifts */
  103. enum {
  104. CMDQ_KEYTABLEADDR_SHIFT = 0,
  105. CMDQ_KEYTABLEID_SHIFT = 17,
  106. CMDQ_VRAMSEL_SHIFT = 23,
  107. CMDQ_TABLESEL_SHIFT = 24,
  108. CMDQ_OPCODE_SHIFT = 26,
  109. };
  110. /*
  111. * The secure key slot contains a unique secure key generated
  112. * and loaded by the bootloader. This slot is marked as non-accessible
  113. * to the kernel.
  114. */
  115. #define SSK_SLOT_NUM 4
  116. #define AES_NR_KEYSLOTS 8
  117. #define TEGRA_AES_QUEUE_LENGTH 50
  118. #define DEFAULT_RNG_BLK_SZ 16
  119. /* The command queue depth */
  120. #define AES_HW_MAX_ICQ_LENGTH 5
  121. struct tegra_aes_slot {
  122. struct list_head node;
  123. int slot_num;
  124. };
  125. static struct tegra_aes_slot ssk = {
  126. .slot_num = SSK_SLOT_NUM,
  127. };
  128. struct tegra_aes_reqctx {
  129. unsigned long mode;
  130. };
  131. struct tegra_aes_dev {
  132. struct device *dev;
  133. void __iomem *io_base;
  134. dma_addr_t ivkey_phys_base;
  135. void __iomem *ivkey_base;
  136. struct clk *aes_clk;
  137. struct tegra_aes_ctx *ctx;
  138. int irq;
  139. unsigned long flags;
  140. struct completion op_complete;
  141. u32 *buf_in;
  142. dma_addr_t dma_buf_in;
  143. u32 *buf_out;
  144. dma_addr_t dma_buf_out;
  145. u8 *iv;
  146. u8 dt[DEFAULT_RNG_BLK_SZ];
  147. int ivlen;
  148. u64 ctr;
  149. spinlock_t lock;
  150. struct crypto_queue queue;
  151. struct tegra_aes_slot *slots;
  152. struct ablkcipher_request *req;
  153. size_t total;
  154. struct scatterlist *in_sg;
  155. size_t in_offset;
  156. struct scatterlist *out_sg;
  157. size_t out_offset;
  158. };
  159. static struct tegra_aes_dev *aes_dev;
  160. struct tegra_aes_ctx {
  161. struct tegra_aes_dev *dd;
  162. unsigned long flags;
  163. struct tegra_aes_slot *slot;
  164. u8 key[AES_MAX_KEY_SIZE];
  165. size_t keylen;
  166. };
  167. static struct tegra_aes_ctx rng_ctx = {
  168. .flags = FLAGS_NEW_KEY,
  169. .keylen = AES_KEYSIZE_128,
  170. };
  171. /* keep registered devices data here */
  172. static struct list_head dev_list;
  173. static DEFINE_SPINLOCK(list_lock);
  174. static DEFINE_MUTEX(aes_lock);
  175. static void aes_workqueue_handler(struct work_struct *work);
  176. static DECLARE_WORK(aes_work, aes_workqueue_handler);
  177. static struct workqueue_struct *aes_wq;
  178. extern unsigned long long tegra_chip_uid(void);
  179. static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
  180. {
  181. return readl(dd->io_base + offset);
  182. }
  183. static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
  184. {
  185. writel(val, dd->io_base + offset);
  186. }
  187. static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
  188. int nblocks, int mode, bool upd_iv)
  189. {
  190. u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
  191. int i, eng_busy, icq_empty, ret;
  192. u32 value;
  193. /* reset all the interrupt bits */
  194. aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS);
  195. /* enable error, dma xfer complete interrupts */
  196. aes_writel(dd, 0x33, TEGRA_AES_INT_ENB);
  197. cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT;
  198. cmdq[1] = in_addr;
  199. cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1);
  200. cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT;
  201. value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL);
  202. /* access SDRAM through AHB */
  203. value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD;
  204. value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD;
  205. value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD |
  206. TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD |
  207. TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD;
  208. aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL);
  209. dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
  210. value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) |
  211. ((dd->ctx->keylen * 8) <<
  212. TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) |
  213. ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT);
  214. if (mode & FLAGS_CBC) {
  215. value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3)
  216. << TEGRA_AES_SECURE_XOR_POS_SHIFT) |
  217. (((mode & FLAGS_ENCRYPT) ? 2 : 3)
  218. << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) |
  219. ((mode & FLAGS_ENCRYPT) ? 1 : 0)
  220. << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
  221. } else if (mode & FLAGS_OFB) {
  222. value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) |
  223. (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) |
  224. (TEGRA_AES_SECURE_CORE_SEL_FIELD));
  225. } else if (mode & FLAGS_RNG) {
  226. value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
  227. << TEGRA_AES_SECURE_CORE_SEL_SHIFT |
  228. TEGRA_AES_SECURE_RNG_ENB_FIELD);
  229. } else {
  230. value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
  231. << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
  232. }
  233. dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
  234. aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
  235. aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
  236. INIT_COMPLETION(dd->op_complete);
  237. for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
  238. do {
  239. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  240. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  241. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  242. } while (eng_busy & (!icq_empty));
  243. aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
  244. }
  245. ret = wait_for_completion_timeout(&dd->op_complete,
  246. msecs_to_jiffies(150));
  247. if (ret == 0) {
  248. dev_err(dd->dev, "timed out (0x%x)\n",
  249. aes_readl(dd, TEGRA_AES_INTR_STATUS));
  250. return -ETIMEDOUT;
  251. }
  252. aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR);
  253. return 0;
  254. }
  255. static void aes_release_key_slot(struct tegra_aes_slot *slot)
  256. {
  257. if (slot->slot_num == SSK_SLOT_NUM)
  258. return;
  259. spin_lock(&list_lock);
  260. list_add_tail(&slot->node, &dev_list);
  261. slot = NULL;
  262. spin_unlock(&list_lock);
  263. }
  264. static struct tegra_aes_slot *aes_find_key_slot(void)
  265. {
  266. struct tegra_aes_slot *slot = NULL;
  267. struct list_head *new_head;
  268. int empty;
  269. spin_lock(&list_lock);
  270. empty = list_empty(&dev_list);
  271. if (!empty) {
  272. slot = list_entry(&dev_list, struct tegra_aes_slot, node);
  273. new_head = dev_list.next;
  274. list_del(&dev_list);
  275. dev_list.next = new_head->next;
  276. dev_list.prev = NULL;
  277. }
  278. spin_unlock(&list_lock);
  279. return slot;
  280. }
  281. static int aes_set_key(struct tegra_aes_dev *dd)
  282. {
  283. u32 value, cmdq[2];
  284. struct tegra_aes_ctx *ctx = dd->ctx;
  285. int eng_busy, icq_empty, dma_busy;
  286. bool use_ssk = false;
  287. /* use ssk? */
  288. if (!dd->ctx->slot) {
  289. dev_dbg(dd->dev, "using ssk");
  290. dd->ctx->slot = &ssk;
  291. use_ssk = true;
  292. }
  293. /* enable key schedule generation in hardware */
  294. value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT);
  295. value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD;
  296. aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT);
  297. /* select the key slot */
  298. value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG);
  299. value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD;
  300. value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT);
  301. aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG);
  302. if (use_ssk)
  303. return 0;
  304. /* copy the key table from sdram to vram */
  305. cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT |
  306. MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT |
  307. AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) <<
  308. MEMDMA_NUM_WORDS_SHIFT;
  309. cmdq[1] = (u32)dd->ivkey_phys_base;
  310. aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR);
  311. aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR);
  312. do {
  313. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  314. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  315. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  316. dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
  317. } while (eng_busy & (!icq_empty) & dma_busy);
  318. /* settable command to get key into internal registers */
  319. value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
  320. SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT |
  321. SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT |
  322. (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) <<
  323. CMDQ_KEYTABLEID_SHIFT;
  324. aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR);
  325. do {
  326. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  327. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  328. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  329. } while (eng_busy & (!icq_empty));
  330. return 0;
  331. }
  332. static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
  333. {
  334. struct crypto_async_request *async_req, *backlog;
  335. struct crypto_ablkcipher *tfm;
  336. struct tegra_aes_ctx *ctx;
  337. struct tegra_aes_reqctx *rctx;
  338. struct ablkcipher_request *req;
  339. unsigned long flags;
  340. int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
  341. int ret = 0, nblocks, total;
  342. int count = 0;
  343. dma_addr_t addr_in, addr_out;
  344. struct scatterlist *in_sg, *out_sg;
  345. if (!dd)
  346. return -EINVAL;
  347. spin_lock_irqsave(&dd->lock, flags);
  348. backlog = crypto_get_backlog(&dd->queue);
  349. async_req = crypto_dequeue_request(&dd->queue);
  350. if (!async_req)
  351. clear_bit(FLAGS_BUSY, &dd->flags);
  352. spin_unlock_irqrestore(&dd->lock, flags);
  353. if (!async_req)
  354. return -ENODATA;
  355. if (backlog)
  356. backlog->complete(backlog, -EINPROGRESS);
  357. req = ablkcipher_request_cast(async_req);
  358. dev_dbg(dd->dev, "%s: get new req\n", __func__);
  359. if (!req->src || !req->dst)
  360. return -EINVAL;
  361. /* take mutex to access the aes hw */
  362. mutex_lock(&aes_lock);
  363. /* assign new request to device */
  364. dd->req = req;
  365. dd->total = req->nbytes;
  366. dd->in_offset = 0;
  367. dd->in_sg = req->src;
  368. dd->out_offset = 0;
  369. dd->out_sg = req->dst;
  370. in_sg = dd->in_sg;
  371. out_sg = dd->out_sg;
  372. total = dd->total;
  373. tfm = crypto_ablkcipher_reqtfm(req);
  374. rctx = ablkcipher_request_ctx(req);
  375. ctx = crypto_ablkcipher_ctx(tfm);
  376. rctx->mode &= FLAGS_MODE_MASK;
  377. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  378. dd->iv = (u8 *)req->info;
  379. dd->ivlen = crypto_ablkcipher_ivsize(tfm);
  380. /* assign new context to device */
  381. ctx->dd = dd;
  382. dd->ctx = ctx;
  383. if (ctx->flags & FLAGS_NEW_KEY) {
  384. /* copy the key */
  385. memcpy(dd->ivkey_base, ctx->key, ctx->keylen);
  386. memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen);
  387. aes_set_key(dd);
  388. ctx->flags &= ~FLAGS_NEW_KEY;
  389. }
  390. if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) {
  391. /* set iv to the aes hw slot
  392. * Hw generates updated iv only after iv is set in slot.
  393. * So key and iv is passed asynchronously.
  394. */
  395. memcpy(dd->buf_in, dd->iv, dd->ivlen);
  396. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  397. dd->dma_buf_out, 1, FLAGS_CBC, false);
  398. if (ret < 0) {
  399. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  400. goto out;
  401. }
  402. }
  403. while (total) {
  404. dev_dbg(dd->dev, "remain: %d\n", total);
  405. ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
  406. if (!ret) {
  407. dev_err(dd->dev, "dma_map_sg() error\n");
  408. goto out;
  409. }
  410. ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
  411. if (!ret) {
  412. dev_err(dd->dev, "dma_map_sg() error\n");
  413. dma_unmap_sg(dd->dev, dd->in_sg,
  414. 1, DMA_TO_DEVICE);
  415. goto out;
  416. }
  417. addr_in = sg_dma_address(in_sg);
  418. addr_out = sg_dma_address(out_sg);
  419. dd->flags |= FLAGS_FAST;
  420. count = min_t(int, sg_dma_len(in_sg), dma_max);
  421. WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
  422. nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
  423. ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
  424. dd->flags, true);
  425. dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
  426. dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
  427. if (ret < 0) {
  428. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  429. goto out;
  430. }
  431. dd->flags &= ~FLAGS_FAST;
  432. dev_dbg(dd->dev, "out: copied %d\n", count);
  433. total -= count;
  434. in_sg = sg_next(in_sg);
  435. out_sg = sg_next(out_sg);
  436. WARN_ON(((total != 0) && (!in_sg || !out_sg)));
  437. }
  438. out:
  439. mutex_unlock(&aes_lock);
  440. dd->total = total;
  441. if (dd->req->base.complete)
  442. dd->req->base.complete(&dd->req->base, ret);
  443. dev_dbg(dd->dev, "%s: exit\n", __func__);
  444. return ret;
  445. }
  446. static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  447. unsigned int keylen)
  448. {
  449. struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  450. struct tegra_aes_dev *dd = aes_dev;
  451. struct tegra_aes_slot *key_slot;
  452. if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
  453. (keylen != AES_KEYSIZE_256)) {
  454. dev_err(dd->dev, "unsupported key size\n");
  455. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  456. return -EINVAL;
  457. }
  458. dev_dbg(dd->dev, "keylen: %d\n", keylen);
  459. ctx->dd = dd;
  460. if (key) {
  461. if (!ctx->slot) {
  462. key_slot = aes_find_key_slot();
  463. if (!key_slot) {
  464. dev_err(dd->dev, "no empty slot\n");
  465. return -ENOMEM;
  466. }
  467. ctx->slot = key_slot;
  468. }
  469. memcpy(ctx->key, key, keylen);
  470. ctx->keylen = keylen;
  471. }
  472. ctx->flags |= FLAGS_NEW_KEY;
  473. dev_dbg(dd->dev, "done\n");
  474. return 0;
  475. }
  476. static void aes_workqueue_handler(struct work_struct *work)
  477. {
  478. struct tegra_aes_dev *dd = aes_dev;
  479. int ret;
  480. ret = clk_prepare_enable(dd->aes_clk);
  481. if (ret)
  482. BUG_ON("clock enable failed");
  483. /* empty the crypto queue and then return */
  484. do {
  485. ret = tegra_aes_handle_req(dd);
  486. } while (!ret);
  487. clk_disable_unprepare(dd->aes_clk);
  488. }
  489. static irqreturn_t aes_irq(int irq, void *dev_id)
  490. {
  491. struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
  492. u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  493. int busy = test_bit(FLAGS_BUSY, &dd->flags);
  494. if (!busy) {
  495. dev_dbg(dd->dev, "spurious interrupt\n");
  496. return IRQ_NONE;
  497. }
  498. dev_dbg(dd->dev, "irq_stat: 0x%x\n", value);
  499. if (value & TEGRA_AES_INT_ERROR_MASK)
  500. aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS);
  501. if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD))
  502. complete(&dd->op_complete);
  503. else
  504. return IRQ_NONE;
  505. return IRQ_HANDLED;
  506. }
  507. static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  508. {
  509. struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  510. struct tegra_aes_dev *dd = aes_dev;
  511. unsigned long flags;
  512. int err = 0;
  513. int busy;
  514. dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
  515. req->nbytes, !!(mode & FLAGS_ENCRYPT),
  516. !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB));
  517. rctx->mode = mode;
  518. spin_lock_irqsave(&dd->lock, flags);
  519. err = ablkcipher_enqueue_request(&dd->queue, req);
  520. busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
  521. spin_unlock_irqrestore(&dd->lock, flags);
  522. if (!busy)
  523. queue_work(aes_wq, &aes_work);
  524. return err;
  525. }
  526. static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
  527. {
  528. return tegra_aes_crypt(req, FLAGS_ENCRYPT);
  529. }
  530. static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
  531. {
  532. return tegra_aes_crypt(req, 0);
  533. }
  534. static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
  535. {
  536. return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  537. }
  538. static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
  539. {
  540. return tegra_aes_crypt(req, FLAGS_CBC);
  541. }
  542. static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req)
  543. {
  544. return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB);
  545. }
  546. static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
  547. {
  548. return tegra_aes_crypt(req, FLAGS_OFB);
  549. }
  550. static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
  551. unsigned int dlen)
  552. {
  553. struct tegra_aes_dev *dd = aes_dev;
  554. struct tegra_aes_ctx *ctx = &rng_ctx;
  555. int ret, i;
  556. u8 *dest = rdata, *dt = dd->dt;
  557. /* take mutex to access the aes hw */
  558. mutex_lock(&aes_lock);
  559. ret = clk_prepare_enable(dd->aes_clk);
  560. if (ret) {
  561. mutex_unlock(&aes_lock);
  562. return ret;
  563. }
  564. ctx->dd = dd;
  565. dd->ctx = ctx;
  566. dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
  567. memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
  568. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  569. (u32)dd->dma_buf_out, 1, dd->flags, true);
  570. if (ret < 0) {
  571. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  572. dlen = ret;
  573. goto out;
  574. }
  575. memcpy(dest, dd->buf_out, dlen);
  576. /* update the DT */
  577. for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
  578. dt[i] += 1;
  579. if (dt[i] != 0)
  580. break;
  581. }
  582. out:
  583. clk_disable_unprepare(dd->aes_clk);
  584. mutex_unlock(&aes_lock);
  585. dev_dbg(dd->dev, "%s: done\n", __func__);
  586. return dlen;
  587. }
  588. static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
  589. unsigned int slen)
  590. {
  591. struct tegra_aes_dev *dd = aes_dev;
  592. struct tegra_aes_ctx *ctx = &rng_ctx;
  593. struct tegra_aes_slot *key_slot;
  594. struct timespec ts;
  595. int ret = 0;
  596. u64 nsec, tmp[2];
  597. u8 *dt;
  598. if (!ctx || !dd) {
  599. dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
  600. (unsigned int)ctx, (unsigned int)dd);
  601. return -EINVAL;
  602. }
  603. if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
  604. dev_err(dd->dev, "seed size invalid");
  605. return -ENOMEM;
  606. }
  607. /* take mutex to access the aes hw */
  608. mutex_lock(&aes_lock);
  609. if (!ctx->slot) {
  610. key_slot = aes_find_key_slot();
  611. if (!key_slot) {
  612. dev_err(dd->dev, "no empty slot\n");
  613. mutex_unlock(&aes_lock);
  614. return -ENOMEM;
  615. }
  616. ctx->slot = key_slot;
  617. }
  618. ctx->dd = dd;
  619. dd->ctx = ctx;
  620. dd->ctr = 0;
  621. ctx->keylen = AES_KEYSIZE_128;
  622. ctx->flags |= FLAGS_NEW_KEY;
  623. /* copy the key to the key slot */
  624. memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
  625. memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128);
  626. dd->iv = seed;
  627. dd->ivlen = slen;
  628. dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
  629. ret = clk_prepare_enable(dd->aes_clk);
  630. if (ret) {
  631. mutex_unlock(&aes_lock);
  632. return ret;
  633. }
  634. aes_set_key(dd);
  635. /* set seed to the aes hw slot */
  636. memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
  637. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  638. dd->dma_buf_out, 1, FLAGS_CBC, false);
  639. if (ret < 0) {
  640. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  641. goto out;
  642. }
  643. if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
  644. dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
  645. } else {
  646. getnstimeofday(&ts);
  647. nsec = timespec_to_ns(&ts);
  648. do_div(nsec, 1000);
  649. nsec ^= dd->ctr << 56;
  650. dd->ctr++;
  651. tmp[0] = nsec;
  652. tmp[1] = tegra_chip_uid();
  653. dt = (u8 *)tmp;
  654. }
  655. memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
  656. out:
  657. clk_disable_unprepare(dd->aes_clk);
  658. mutex_unlock(&aes_lock);
  659. dev_dbg(dd->dev, "%s: done\n", __func__);
  660. return ret;
  661. }
  662. static int tegra_aes_cra_init(struct crypto_tfm *tfm)
  663. {
  664. tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
  665. return 0;
  666. }
  667. void tegra_aes_cra_exit(struct crypto_tfm *tfm)
  668. {
  669. struct tegra_aes_ctx *ctx =
  670. crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
  671. if (ctx && ctx->slot)
  672. aes_release_key_slot(ctx->slot);
  673. }
  674. static struct crypto_alg algs[] = {
  675. {
  676. .cra_name = "ecb(aes)",
  677. .cra_driver_name = "ecb-aes-tegra",
  678. .cra_priority = 300,
  679. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  680. .cra_blocksize = AES_BLOCK_SIZE,
  681. .cra_alignmask = 3,
  682. .cra_type = &crypto_ablkcipher_type,
  683. .cra_u.ablkcipher = {
  684. .min_keysize = AES_MIN_KEY_SIZE,
  685. .max_keysize = AES_MAX_KEY_SIZE,
  686. .setkey = tegra_aes_setkey,
  687. .encrypt = tegra_aes_ecb_encrypt,
  688. .decrypt = tegra_aes_ecb_decrypt,
  689. },
  690. }, {
  691. .cra_name = "cbc(aes)",
  692. .cra_driver_name = "cbc-aes-tegra",
  693. .cra_priority = 300,
  694. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  695. .cra_blocksize = AES_BLOCK_SIZE,
  696. .cra_alignmask = 3,
  697. .cra_type = &crypto_ablkcipher_type,
  698. .cra_u.ablkcipher = {
  699. .min_keysize = AES_MIN_KEY_SIZE,
  700. .max_keysize = AES_MAX_KEY_SIZE,
  701. .ivsize = AES_MIN_KEY_SIZE,
  702. .setkey = tegra_aes_setkey,
  703. .encrypt = tegra_aes_cbc_encrypt,
  704. .decrypt = tegra_aes_cbc_decrypt,
  705. }
  706. }, {
  707. .cra_name = "ofb(aes)",
  708. .cra_driver_name = "ofb-aes-tegra",
  709. .cra_priority = 300,
  710. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  711. .cra_blocksize = AES_BLOCK_SIZE,
  712. .cra_alignmask = 3,
  713. .cra_type = &crypto_ablkcipher_type,
  714. .cra_u.ablkcipher = {
  715. .min_keysize = AES_MIN_KEY_SIZE,
  716. .max_keysize = AES_MAX_KEY_SIZE,
  717. .ivsize = AES_MIN_KEY_SIZE,
  718. .setkey = tegra_aes_setkey,
  719. .encrypt = tegra_aes_ofb_encrypt,
  720. .decrypt = tegra_aes_ofb_decrypt,
  721. }
  722. }, {
  723. .cra_name = "ansi_cprng",
  724. .cra_driver_name = "rng-aes-tegra",
  725. .cra_flags = CRYPTO_ALG_TYPE_RNG,
  726. .cra_ctxsize = sizeof(struct tegra_aes_ctx),
  727. .cra_type = &crypto_rng_type,
  728. .cra_u.rng = {
  729. .rng_make_random = tegra_aes_get_random,
  730. .rng_reset = tegra_aes_rng_reset,
  731. .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
  732. }
  733. }
  734. };
  735. static int tegra_aes_probe(struct platform_device *pdev)
  736. {
  737. struct device *dev = &pdev->dev;
  738. struct tegra_aes_dev *dd;
  739. struct resource *res;
  740. int err = -ENOMEM, i = 0, j;
  741. dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL);
  742. if (dd == NULL) {
  743. dev_err(dev, "unable to alloc data struct.\n");
  744. return err;
  745. }
  746. dd->dev = dev;
  747. platform_set_drvdata(pdev, dd);
  748. dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) *
  749. AES_NR_KEYSLOTS, GFP_KERNEL);
  750. if (dd->slots == NULL) {
  751. dev_err(dev, "unable to alloc slot struct.\n");
  752. goto out;
  753. }
  754. spin_lock_init(&dd->lock);
  755. crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
  756. /* Get the module base address */
  757. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  758. if (!res) {
  759. dev_err(dev, "invalid resource type: base\n");
  760. err = -ENODEV;
  761. goto out;
  762. }
  763. if (!devm_request_mem_region(&pdev->dev, res->start,
  764. resource_size(res),
  765. dev_name(&pdev->dev))) {
  766. dev_err(&pdev->dev, "Couldn't request MEM resource\n");
  767. return -ENODEV;
  768. }
  769. dd->io_base = devm_ioremap(dev, res->start, resource_size(res));
  770. if (!dd->io_base) {
  771. dev_err(dev, "can't ioremap register space\n");
  772. err = -ENOMEM;
  773. goto out;
  774. }
  775. /* Initialize the vde clock */
  776. dd->aes_clk = clk_get(dev, "vde");
  777. if (IS_ERR(dd->aes_clk)) {
  778. dev_err(dev, "iclock intialization failed.\n");
  779. err = -ENODEV;
  780. goto out;
  781. }
  782. err = clk_set_rate(dd->aes_clk, ULONG_MAX);
  783. if (err) {
  784. dev_err(dd->dev, "iclk set_rate fail(%d)\n", err);
  785. goto out;
  786. }
  787. /*
  788. * the foll contiguous memory is allocated as follows -
  789. * - hardware key table
  790. * - key schedule
  791. */
  792. dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  793. &dd->ivkey_phys_base,
  794. GFP_KERNEL);
  795. if (!dd->ivkey_base) {
  796. dev_err(dev, "can not allocate iv/key buffer\n");
  797. err = -ENOMEM;
  798. goto out;
  799. }
  800. dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  801. &dd->dma_buf_in, GFP_KERNEL);
  802. if (!dd->buf_in) {
  803. dev_err(dev, "can not allocate dma-in buffer\n");
  804. err = -ENOMEM;
  805. goto out;
  806. }
  807. dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  808. &dd->dma_buf_out, GFP_KERNEL);
  809. if (!dd->buf_out) {
  810. dev_err(dev, "can not allocate dma-out buffer\n");
  811. err = -ENOMEM;
  812. goto out;
  813. }
  814. init_completion(&dd->op_complete);
  815. aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
  816. if (!aes_wq) {
  817. dev_err(dev, "alloc_workqueue failed\n");
  818. err = -ENOMEM;
  819. goto out;
  820. }
  821. /* get the irq */
  822. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  823. if (!res) {
  824. dev_err(dev, "invalid resource type: base\n");
  825. err = -ENODEV;
  826. goto out;
  827. }
  828. dd->irq = res->start;
  829. err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH |
  830. IRQF_SHARED, "tegra-aes", dd);
  831. if (err) {
  832. dev_err(dev, "request_irq failed\n");
  833. goto out;
  834. }
  835. mutex_init(&aes_lock);
  836. INIT_LIST_HEAD(&dev_list);
  837. spin_lock_init(&list_lock);
  838. spin_lock(&list_lock);
  839. for (i = 0; i < AES_NR_KEYSLOTS; i++) {
  840. if (i == SSK_SLOT_NUM)
  841. continue;
  842. dd->slots[i].slot_num = i;
  843. INIT_LIST_HEAD(&dd->slots[i].node);
  844. list_add_tail(&dd->slots[i].node, &dev_list);
  845. }
  846. spin_unlock(&list_lock);
  847. aes_dev = dd;
  848. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  849. algs[i].cra_priority = 300;
  850. algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
  851. algs[i].cra_module = THIS_MODULE;
  852. algs[i].cra_init = tegra_aes_cra_init;
  853. algs[i].cra_exit = tegra_aes_cra_exit;
  854. err = crypto_register_alg(&algs[i]);
  855. if (err)
  856. goto out;
  857. }
  858. dev_info(dev, "registered");
  859. return 0;
  860. out:
  861. for (j = 0; j < i; j++)
  862. crypto_unregister_alg(&algs[j]);
  863. if (dd->ivkey_base)
  864. dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  865. dd->ivkey_base, dd->ivkey_phys_base);
  866. if (dd->buf_in)
  867. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  868. dd->buf_in, dd->dma_buf_in);
  869. if (dd->buf_out)
  870. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  871. dd->buf_out, dd->dma_buf_out);
  872. if (!IS_ERR(dd->aes_clk))
  873. clk_put(dd->aes_clk);
  874. if (aes_wq)
  875. destroy_workqueue(aes_wq);
  876. spin_lock(&list_lock);
  877. list_del(&dev_list);
  878. spin_unlock(&list_lock);
  879. aes_dev = NULL;
  880. dev_err(dev, "%s: initialization failed.\n", __func__);
  881. return err;
  882. }
  883. static int tegra_aes_remove(struct platform_device *pdev)
  884. {
  885. struct device *dev = &pdev->dev;
  886. struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
  887. int i;
  888. for (i = 0; i < ARRAY_SIZE(algs); i++)
  889. crypto_unregister_alg(&algs[i]);
  890. cancel_work_sync(&aes_work);
  891. destroy_workqueue(aes_wq);
  892. spin_lock(&list_lock);
  893. list_del(&dev_list);
  894. spin_unlock(&list_lock);
  895. dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  896. dd->ivkey_base, dd->ivkey_phys_base);
  897. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  898. dd->buf_in, dd->dma_buf_in);
  899. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  900. dd->buf_out, dd->dma_buf_out);
  901. clk_put(dd->aes_clk);
  902. aes_dev = NULL;
  903. return 0;
  904. }
  905. static struct of_device_id tegra_aes_of_match[] = {
  906. { .compatible = "nvidia,tegra20-aes", },
  907. { .compatible = "nvidia,tegra30-aes", },
  908. { },
  909. };
  910. static struct platform_driver tegra_aes_driver = {
  911. .probe = tegra_aes_probe,
  912. .remove = tegra_aes_remove,
  913. .driver = {
  914. .name = "tegra-aes",
  915. .owner = THIS_MODULE,
  916. .of_match_table = tegra_aes_of_match,
  917. },
  918. };
  919. module_platform_driver(tegra_aes_driver);
  920. MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
  921. MODULE_AUTHOR("NVIDIA Corporation");
  922. MODULE_LICENSE("GPL v2");