ctrl.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /*
  2. * CAAM control-plane driver backend
  3. * Controller-level driver, kernel property detection, initialization
  4. *
  5. * Copyright 2008-2012 Freescale Semiconductor, Inc.
  6. */
  7. #include "compat.h"
  8. #include "regs.h"
  9. #include "intern.h"
  10. #include "jr.h"
  11. #include "desc_constr.h"
  12. #include "error.h"
  13. #include "ctrl.h"
  14. static int caam_remove(struct platform_device *pdev)
  15. {
  16. struct device *ctrldev;
  17. struct caam_drv_private *ctrlpriv;
  18. struct caam_drv_private_jr *jrpriv;
  19. struct caam_full __iomem *topregs;
  20. int ring, ret = 0;
  21. ctrldev = &pdev->dev;
  22. ctrlpriv = dev_get_drvdata(ctrldev);
  23. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  24. /* shut down JobRs */
  25. for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
  26. ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
  27. jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
  28. irq_dispose_mapping(jrpriv->irq);
  29. }
  30. /* Shut down debug views */
  31. #ifdef CONFIG_DEBUG_FS
  32. debugfs_remove_recursive(ctrlpriv->dfs_root);
  33. #endif
  34. /* Unmap controller region */
  35. iounmap(&topregs->ctrl);
  36. kfree(ctrlpriv->jrdev);
  37. kfree(ctrlpriv);
  38. return ret;
  39. }
  40. /*
  41. * Descriptor to instantiate RNG State Handle 0 in normal mode and
  42. * load the JDKEK, TDKEK and TDSK registers
  43. */
  44. static void build_instantiation_desc(u32 *desc)
  45. {
  46. u32 *jump_cmd;
  47. init_job_desc(desc, 0);
  48. /* INIT RNG in non-test mode */
  49. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  50. OP_ALG_AS_INIT);
  51. /* wait for done */
  52. jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
  53. set_jump_tgt_here(desc, jump_cmd);
  54. /*
  55. * load 1 to clear written reg:
  56. * resets the done interrupt and returns the RNG to idle.
  57. */
  58. append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
  59. /* generate secure keys (non-test) */
  60. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  61. OP_ALG_RNG4_SK);
  62. }
  63. struct instantiate_result {
  64. struct completion completion;
  65. int err;
  66. };
  67. static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
  68. void *context)
  69. {
  70. struct instantiate_result *instantiation = context;
  71. if (err) {
  72. char tmp[CAAM_ERROR_STR_MAX];
  73. dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  74. }
  75. instantiation->err = err;
  76. complete(&instantiation->completion);
  77. }
  78. static int instantiate_rng(struct device *jrdev)
  79. {
  80. struct instantiate_result instantiation;
  81. dma_addr_t desc_dma;
  82. u32 *desc;
  83. int ret;
  84. desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
  85. if (!desc) {
  86. dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
  87. return -ENOMEM;
  88. }
  89. build_instantiation_desc(desc);
  90. desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
  91. init_completion(&instantiation.completion);
  92. ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
  93. if (!ret) {
  94. wait_for_completion_interruptible(&instantiation.completion);
  95. ret = instantiation.err;
  96. if (ret)
  97. dev_err(jrdev, "unable to instantiate RNG\n");
  98. }
  99. dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
  100. kfree(desc);
  101. return ret;
  102. }
  103. /*
  104. * By default, the TRNG runs for 200 clocks per sample;
  105. * 1600 clocks per sample generates better entropy.
  106. */
  107. static void kick_trng(struct platform_device *pdev)
  108. {
  109. struct device *ctrldev = &pdev->dev;
  110. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  111. struct caam_full __iomem *topregs;
  112. struct rng4tst __iomem *r4tst;
  113. u32 val;
  114. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  115. r4tst = &topregs->ctrl.r4tst[0];
  116. /* put RNG4 into program mode */
  117. setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  118. /* 1600 clocks per sample */
  119. val = rd_reg32(&r4tst->rtsdctl);
  120. val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
  121. wr_reg32(&r4tst->rtsdctl, val);
  122. /* min. freq. count */
  123. wr_reg32(&r4tst->rtfrqmin, 400);
  124. /* max. freq. count */
  125. wr_reg32(&r4tst->rtfrqmax, 6400);
  126. /* put RNG4 into run mode */
  127. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  128. }
  129. /**
  130. * caam_get_era() - Return the ERA of the SEC on SoC, based
  131. * on the SEC_VID register.
  132. * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
  133. * @caam_id - the value of the SEC_VID register
  134. **/
  135. int caam_get_era(u64 caam_id)
  136. {
  137. struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
  138. static const struct {
  139. u16 ip_id;
  140. u8 maj_rev;
  141. u8 era;
  142. } caam_eras[] = {
  143. {0x0A10, 1, 1},
  144. {0x0A10, 2, 2},
  145. {0x0A12, 1, 3},
  146. {0x0A14, 1, 3},
  147. {0x0A14, 2, 4},
  148. {0x0A16, 1, 4},
  149. {0x0A11, 1, 4}
  150. };
  151. int i;
  152. for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
  153. if (caam_eras[i].ip_id == sec_vid->ip_id &&
  154. caam_eras[i].maj_rev == sec_vid->maj_rev)
  155. return caam_eras[i].era;
  156. return -ENOTSUPP;
  157. }
  158. EXPORT_SYMBOL(caam_get_era);
  159. /* Probe routine for CAAM top (controller) level */
  160. static int caam_probe(struct platform_device *pdev)
  161. {
  162. int ret, ring, rspec;
  163. u64 caam_id;
  164. struct device *dev;
  165. struct device_node *nprop, *np;
  166. struct caam_ctrl __iomem *ctrl;
  167. struct caam_full __iomem *topregs;
  168. struct caam_drv_private *ctrlpriv;
  169. #ifdef CONFIG_DEBUG_FS
  170. struct caam_perfmon *perfmon;
  171. #endif
  172. u64 cha_vid;
  173. ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
  174. if (!ctrlpriv)
  175. return -ENOMEM;
  176. dev = &pdev->dev;
  177. dev_set_drvdata(dev, ctrlpriv);
  178. ctrlpriv->pdev = pdev;
  179. nprop = pdev->dev.of_node;
  180. /* Get configuration properties from device tree */
  181. /* First, get register page */
  182. ctrl = of_iomap(nprop, 0);
  183. if (ctrl == NULL) {
  184. dev_err(dev, "caam: of_iomap() failed\n");
  185. return -ENOMEM;
  186. }
  187. ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
  188. /* topregs used to derive pointers to CAAM sub-blocks only */
  189. topregs = (struct caam_full __iomem *)ctrl;
  190. /* Get the IRQ of the controller (for security violations only) */
  191. ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
  192. /*
  193. * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
  194. * long pointers in master configuration register
  195. */
  196. setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
  197. (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  198. if (sizeof(dma_addr_t) == sizeof(u64))
  199. if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
  200. dma_set_mask(dev, DMA_BIT_MASK(40));
  201. else
  202. dma_set_mask(dev, DMA_BIT_MASK(36));
  203. else
  204. dma_set_mask(dev, DMA_BIT_MASK(32));
  205. /*
  206. * Detect and enable JobRs
  207. * First, find out how many ring spec'ed, allocate references
  208. * for all, then go probe each one.
  209. */
  210. rspec = 0;
  211. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
  212. rspec++;
  213. if (!rspec) {
  214. /* for backward compatible with device trees */
  215. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
  216. rspec++;
  217. }
  218. ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
  219. if (ctrlpriv->jrdev == NULL) {
  220. iounmap(&topregs->ctrl);
  221. return -ENOMEM;
  222. }
  223. ring = 0;
  224. ctrlpriv->total_jobrs = 0;
  225. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
  226. caam_jr_probe(pdev, np, ring);
  227. ctrlpriv->total_jobrs++;
  228. ring++;
  229. }
  230. if (!ring) {
  231. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
  232. caam_jr_probe(pdev, np, ring);
  233. ctrlpriv->total_jobrs++;
  234. ring++;
  235. }
  236. }
  237. /* Check to see if QI present. If so, enable */
  238. ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
  239. CTPR_QI_MASK);
  240. if (ctrlpriv->qi_present) {
  241. ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
  242. /* This is all that's required to physically enable QI */
  243. wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
  244. }
  245. /* If no QI and no rings specified, quit and go home */
  246. if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
  247. dev_err(dev, "no queues configured, terminating\n");
  248. caam_remove(pdev);
  249. return -ENOMEM;
  250. }
  251. cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
  252. /*
  253. * If SEC has RNG version >= 4 and RNG state handle has not been
  254. * already instantiated ,do RNG instantiation
  255. */
  256. if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
  257. !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
  258. kick_trng(pdev);
  259. ret = instantiate_rng(ctrlpriv->jrdev[0]);
  260. if (ret) {
  261. caam_remove(pdev);
  262. return ret;
  263. }
  264. /* Enable RDB bit so that RNG works faster */
  265. setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
  266. }
  267. /* NOTE: RTIC detection ought to go here, around Si time */
  268. /* Initialize queue allocator lock */
  269. spin_lock_init(&ctrlpriv->jr_alloc_lock);
  270. caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
  271. /* Report "alive" for developer to see */
  272. dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
  273. caam_get_era(caam_id));
  274. dev_info(dev, "job rings = %d, qi = %d\n",
  275. ctrlpriv->total_jobrs, ctrlpriv->qi_present);
  276. #ifdef CONFIG_DEBUG_FS
  277. /*
  278. * FIXME: needs better naming distinction, as some amalgamation of
  279. * "caam" and nprop->full_name. The OF name isn't distinctive,
  280. * but does separate instances
  281. */
  282. perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
  283. ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
  284. ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
  285. /* Controller-level - performance monitor counters */
  286. ctrlpriv->ctl_rq_dequeued =
  287. debugfs_create_u64("rq_dequeued",
  288. S_IRUSR | S_IRGRP | S_IROTH,
  289. ctrlpriv->ctl, &perfmon->req_dequeued);
  290. ctrlpriv->ctl_ob_enc_req =
  291. debugfs_create_u64("ob_rq_encrypted",
  292. S_IRUSR | S_IRGRP | S_IROTH,
  293. ctrlpriv->ctl, &perfmon->ob_enc_req);
  294. ctrlpriv->ctl_ib_dec_req =
  295. debugfs_create_u64("ib_rq_decrypted",
  296. S_IRUSR | S_IRGRP | S_IROTH,
  297. ctrlpriv->ctl, &perfmon->ib_dec_req);
  298. ctrlpriv->ctl_ob_enc_bytes =
  299. debugfs_create_u64("ob_bytes_encrypted",
  300. S_IRUSR | S_IRGRP | S_IROTH,
  301. ctrlpriv->ctl, &perfmon->ob_enc_bytes);
  302. ctrlpriv->ctl_ob_prot_bytes =
  303. debugfs_create_u64("ob_bytes_protected",
  304. S_IRUSR | S_IRGRP | S_IROTH,
  305. ctrlpriv->ctl, &perfmon->ob_prot_bytes);
  306. ctrlpriv->ctl_ib_dec_bytes =
  307. debugfs_create_u64("ib_bytes_decrypted",
  308. S_IRUSR | S_IRGRP | S_IROTH,
  309. ctrlpriv->ctl, &perfmon->ib_dec_bytes);
  310. ctrlpriv->ctl_ib_valid_bytes =
  311. debugfs_create_u64("ib_bytes_validated",
  312. S_IRUSR | S_IRGRP | S_IROTH,
  313. ctrlpriv->ctl, &perfmon->ib_valid_bytes);
  314. /* Controller level - global status values */
  315. ctrlpriv->ctl_faultaddr =
  316. debugfs_create_u64("fault_addr",
  317. S_IRUSR | S_IRGRP | S_IROTH,
  318. ctrlpriv->ctl, &perfmon->faultaddr);
  319. ctrlpriv->ctl_faultdetail =
  320. debugfs_create_u32("fault_detail",
  321. S_IRUSR | S_IRGRP | S_IROTH,
  322. ctrlpriv->ctl, &perfmon->faultdetail);
  323. ctrlpriv->ctl_faultstatus =
  324. debugfs_create_u32("fault_status",
  325. S_IRUSR | S_IRGRP | S_IROTH,
  326. ctrlpriv->ctl, &perfmon->status);
  327. /* Internal covering keys (useful in non-secure mode only) */
  328. ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
  329. ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  330. ctrlpriv->ctl_kek = debugfs_create_blob("kek",
  331. S_IRUSR |
  332. S_IRGRP | S_IROTH,
  333. ctrlpriv->ctl,
  334. &ctrlpriv->ctl_kek_wrap);
  335. ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
  336. ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  337. ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
  338. S_IRUSR |
  339. S_IRGRP | S_IROTH,
  340. ctrlpriv->ctl,
  341. &ctrlpriv->ctl_tkek_wrap);
  342. ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
  343. ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  344. ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
  345. S_IRUSR |
  346. S_IRGRP | S_IROTH,
  347. ctrlpriv->ctl,
  348. &ctrlpriv->ctl_tdsk_wrap);
  349. #endif
  350. return 0;
  351. }
  352. static struct of_device_id caam_match[] = {
  353. {
  354. .compatible = "fsl,sec-v4.0",
  355. },
  356. {
  357. .compatible = "fsl,sec4.0",
  358. },
  359. {},
  360. };
  361. MODULE_DEVICE_TABLE(of, caam_match);
  362. static struct platform_driver caam_driver = {
  363. .driver = {
  364. .name = "caam",
  365. .owner = THIS_MODULE,
  366. .of_match_table = caam_match,
  367. },
  368. .probe = caam_probe,
  369. .remove = caam_remove,
  370. };
  371. module_platform_driver(caam_driver);
  372. MODULE_LICENSE("GPL");
  373. MODULE_DESCRIPTION("FSL CAAM request backend");
  374. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");