tpm_ibmvtpm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * Copyright (C) 2012 IBM Corporation
  3. *
  4. * Author: Ashley Lai <adlai@us.ibm.com>
  5. *
  6. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  7. *
  8. * Device driver for TCG/TCPA TPM (trusted platform module).
  9. * Specifications at www.trustedcomputinggroup.org
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation, version 2 of the
  14. * License.
  15. *
  16. */
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/slab.h>
  20. #include <asm/vio.h>
  21. #include <asm/irq.h>
  22. #include <linux/types.h>
  23. #include <linux/list.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/wait.h>
  27. #include <asm/prom.h>
  28. #include "tpm.h"
  29. #include "tpm_ibmvtpm.h"
  30. static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  31. static struct vio_device_id tpm_ibmvtpm_device_table[] = {
  32. { "IBM,vtpm", "IBM,vtpm"},
  33. { "", "" }
  34. };
  35. MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  36. /**
  37. * ibmvtpm_send_crq - Send a CRQ request
  38. * @vdev: vio device struct
  39. * @w1: first word
  40. * @w2: second word
  41. *
  42. * Return value:
  43. * 0 -Sucess
  44. * Non-zero - Failure
  45. */
  46. static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
  47. {
  48. return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
  49. }
  50. /**
  51. * ibmvtpm_get_data - Retrieve ibm vtpm data
  52. * @dev: device struct
  53. *
  54. * Return value:
  55. * vtpm device struct
  56. */
  57. static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
  58. {
  59. struct tpm_chip *chip = dev_get_drvdata(dev);
  60. if (chip)
  61. return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
  62. return NULL;
  63. }
  64. /**
  65. * tpm_ibmvtpm_recv - Receive data after send
  66. * @chip: tpm chip struct
  67. * @buf: buffer to read
  68. * count: size of buffer
  69. *
  70. * Return value:
  71. * Number of bytes read
  72. */
  73. static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  74. {
  75. struct ibmvtpm_dev *ibmvtpm;
  76. u16 len;
  77. int sig;
  78. ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
  79. if (!ibmvtpm->rtce_buf) {
  80. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  81. return 0;
  82. }
  83. sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
  84. if (sig)
  85. return -EINTR;
  86. len = ibmvtpm->res_len;
  87. if (count < len) {
  88. dev_err(ibmvtpm->dev,
  89. "Invalid size in recv: count=%ld, crq_size=%d\n",
  90. count, len);
  91. return -EIO;
  92. }
  93. spin_lock(&ibmvtpm->rtce_lock);
  94. memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
  95. memset(ibmvtpm->rtce_buf, 0, len);
  96. ibmvtpm->res_len = 0;
  97. spin_unlock(&ibmvtpm->rtce_lock);
  98. return len;
  99. }
  100. /**
  101. * tpm_ibmvtpm_send - Send tpm request
  102. * @chip: tpm chip struct
  103. * @buf: buffer contains data to send
  104. * count: size of buffer
  105. *
  106. * Return value:
  107. * Number of bytes sent
  108. */
  109. static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  110. {
  111. struct ibmvtpm_dev *ibmvtpm;
  112. struct ibmvtpm_crq crq;
  113. u64 *word = (u64 *) &crq;
  114. int rc;
  115. ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
  116. if (!ibmvtpm->rtce_buf) {
  117. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  118. return 0;
  119. }
  120. if (count > ibmvtpm->rtce_size) {
  121. dev_err(ibmvtpm->dev,
  122. "Invalid size in send: count=%ld, rtce_size=%d\n",
  123. count, ibmvtpm->rtce_size);
  124. return -EIO;
  125. }
  126. spin_lock(&ibmvtpm->rtce_lock);
  127. memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
  128. crq.valid = (u8)IBMVTPM_VALID_CMD;
  129. crq.msg = (u8)VTPM_TPM_COMMAND;
  130. crq.len = (u16)count;
  131. crq.data = ibmvtpm->rtce_dma_handle;
  132. rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
  133. if (rc != H_SUCCESS) {
  134. dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
  135. rc = 0;
  136. } else
  137. rc = count;
  138. spin_unlock(&ibmvtpm->rtce_lock);
  139. return rc;
  140. }
  141. static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
  142. {
  143. return;
  144. }
  145. static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  146. {
  147. return 0;
  148. }
  149. /**
  150. * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
  151. * @ibmvtpm: vtpm device struct
  152. *
  153. * Return value:
  154. * 0 - Success
  155. * Non-zero - Failure
  156. */
  157. static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  158. {
  159. struct ibmvtpm_crq crq;
  160. u64 *buf = (u64 *) &crq;
  161. int rc;
  162. crq.valid = (u8)IBMVTPM_VALID_CMD;
  163. crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
  164. rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
  165. if (rc != H_SUCCESS)
  166. dev_err(ibmvtpm->dev,
  167. "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
  168. return rc;
  169. }
  170. /**
  171. * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
  172. * - Note that this is vtpm version and not tpm version
  173. * @ibmvtpm: vtpm device struct
  174. *
  175. * Return value:
  176. * 0 - Success
  177. * Non-zero - Failure
  178. */
  179. static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
  180. {
  181. struct ibmvtpm_crq crq;
  182. u64 *buf = (u64 *) &crq;
  183. int rc;
  184. crq.valid = (u8)IBMVTPM_VALID_CMD;
  185. crq.msg = (u8)VTPM_GET_VERSION;
  186. rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
  187. if (rc != H_SUCCESS)
  188. dev_err(ibmvtpm->dev,
  189. "ibmvtpm_crq_get_version failed rc=%d\n", rc);
  190. return rc;
  191. }
  192. /**
  193. * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
  194. * @ibmvtpm: vtpm device struct
  195. *
  196. * Return value:
  197. * 0 - Success
  198. * Non-zero - Failure
  199. */
  200. static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
  201. {
  202. int rc;
  203. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
  204. if (rc != H_SUCCESS)
  205. dev_err(ibmvtpm->dev,
  206. "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
  207. return rc;
  208. }
  209. /**
  210. * ibmvtpm_crq_send_init - Send a CRQ initialize message
  211. * @ibmvtpm: vtpm device struct
  212. *
  213. * Return value:
  214. * 0 - Success
  215. * Non-zero - Failure
  216. */
  217. static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
  218. {
  219. int rc;
  220. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
  221. if (rc != H_SUCCESS)
  222. dev_err(ibmvtpm->dev,
  223. "ibmvtpm_crq_send_init failed rc=%d\n", rc);
  224. return rc;
  225. }
  226. /**
  227. * tpm_ibmvtpm_remove - ibm vtpm remove entry point
  228. * @vdev: vio device struct
  229. *
  230. * Return value:
  231. * 0
  232. */
  233. static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
  234. {
  235. struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
  236. int rc = 0;
  237. free_irq(vdev->irq, ibmvtpm);
  238. do {
  239. if (rc)
  240. msleep(100);
  241. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  242. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  243. dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
  244. CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
  245. free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
  246. if (ibmvtpm->rtce_buf) {
  247. dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
  248. ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
  249. kfree(ibmvtpm->rtce_buf);
  250. }
  251. tpm_remove_hardware(ibmvtpm->dev);
  252. kfree(ibmvtpm);
  253. return 0;
  254. }
  255. /**
  256. * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
  257. * @vdev: vio device struct
  258. *
  259. * Return value:
  260. * Number of bytes the driver needs to DMA map
  261. */
  262. static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
  263. {
  264. struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
  265. return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
  266. }
  267. /**
  268. * tpm_ibmvtpm_suspend - Suspend
  269. * @dev: device struct
  270. *
  271. * Return value:
  272. * 0
  273. */
  274. static int tpm_ibmvtpm_suspend(struct device *dev)
  275. {
  276. struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
  277. struct ibmvtpm_crq crq;
  278. u64 *buf = (u64 *) &crq;
  279. int rc = 0;
  280. crq.valid = (u8)IBMVTPM_VALID_CMD;
  281. crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
  282. rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
  283. if (rc != H_SUCCESS)
  284. dev_err(ibmvtpm->dev,
  285. "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
  286. return rc;
  287. }
  288. /**
  289. * ibmvtpm_reset_crq - Reset CRQ
  290. * @ibmvtpm: ibm vtpm struct
  291. *
  292. * Return value:
  293. * 0 - Success
  294. * Non-zero - Failure
  295. */
  296. static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
  297. {
  298. int rc = 0;
  299. do {
  300. if (rc)
  301. msleep(100);
  302. rc = plpar_hcall_norets(H_FREE_CRQ,
  303. ibmvtpm->vdev->unit_address);
  304. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  305. memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
  306. ibmvtpm->crq_queue.index = 0;
  307. return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
  308. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  309. }
  310. /**
  311. * tpm_ibmvtpm_resume - Resume from suspend
  312. * @dev: device struct
  313. *
  314. * Return value:
  315. * 0
  316. */
  317. static int tpm_ibmvtpm_resume(struct device *dev)
  318. {
  319. struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
  320. int rc = 0;
  321. do {
  322. if (rc)
  323. msleep(100);
  324. rc = plpar_hcall_norets(H_ENABLE_CRQ,
  325. ibmvtpm->vdev->unit_address);
  326. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  327. if (rc) {
  328. dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
  329. return rc;
  330. }
  331. rc = vio_enable_interrupts(ibmvtpm->vdev);
  332. if (rc) {
  333. dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
  334. return rc;
  335. }
  336. rc = ibmvtpm_crq_send_init(ibmvtpm);
  337. if (rc)
  338. dev_err(dev, "Error send_init rc=%d\n", rc);
  339. return rc;
  340. }
  341. static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
  342. {
  343. return (status == 0);
  344. }
  345. static const struct file_operations ibmvtpm_ops = {
  346. .owner = THIS_MODULE,
  347. .llseek = no_llseek,
  348. .open = tpm_open,
  349. .read = tpm_read,
  350. .write = tpm_write,
  351. .release = tpm_release,
  352. };
  353. static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  354. static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
  355. static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
  356. static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
  357. static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
  358. static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
  359. NULL);
  360. static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
  361. static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
  362. static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
  363. static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
  364. static struct attribute *ibmvtpm_attrs[] = {
  365. &dev_attr_pubek.attr,
  366. &dev_attr_pcrs.attr,
  367. &dev_attr_enabled.attr,
  368. &dev_attr_active.attr,
  369. &dev_attr_owned.attr,
  370. &dev_attr_temp_deactivated.attr,
  371. &dev_attr_caps.attr,
  372. &dev_attr_cancel.attr,
  373. &dev_attr_durations.attr,
  374. &dev_attr_timeouts.attr, NULL,
  375. };
  376. static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
  377. static const struct tpm_vendor_specific tpm_ibmvtpm = {
  378. .recv = tpm_ibmvtpm_recv,
  379. .send = tpm_ibmvtpm_send,
  380. .cancel = tpm_ibmvtpm_cancel,
  381. .status = tpm_ibmvtpm_status,
  382. .req_complete_mask = 0,
  383. .req_complete_val = 0,
  384. .req_canceled = tpm_ibmvtpm_req_canceled,
  385. .attr_group = &ibmvtpm_attr_grp,
  386. .miscdev = { .fops = &ibmvtpm_ops, },
  387. };
  388. static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
  389. .suspend = tpm_ibmvtpm_suspend,
  390. .resume = tpm_ibmvtpm_resume,
  391. };
  392. /**
  393. * ibmvtpm_crq_get_next - Get next responded crq
  394. * @ibmvtpm vtpm device struct
  395. *
  396. * Return value:
  397. * vtpm crq pointer
  398. */
  399. static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
  400. {
  401. struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
  402. struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
  403. if (crq->valid & VTPM_MSG_RES) {
  404. if (++crq_q->index == crq_q->num_entry)
  405. crq_q->index = 0;
  406. smp_rmb();
  407. } else
  408. crq = NULL;
  409. return crq;
  410. }
  411. /**
  412. * ibmvtpm_crq_process - Process responded crq
  413. * @crq crq to be processed
  414. * @ibmvtpm vtpm device struct
  415. *
  416. * Return value:
  417. * Nothing
  418. */
  419. static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
  420. struct ibmvtpm_dev *ibmvtpm)
  421. {
  422. int rc = 0;
  423. switch (crq->valid) {
  424. case VALID_INIT_CRQ:
  425. switch (crq->msg) {
  426. case INIT_CRQ_RES:
  427. dev_info(ibmvtpm->dev, "CRQ initialized\n");
  428. rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
  429. if (rc)
  430. dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
  431. return;
  432. case INIT_CRQ_COMP_RES:
  433. dev_info(ibmvtpm->dev,
  434. "CRQ initialization completed\n");
  435. return;
  436. default:
  437. dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
  438. return;
  439. }
  440. return;
  441. case IBMVTPM_VALID_CMD:
  442. switch (crq->msg) {
  443. case VTPM_GET_RTCE_BUFFER_SIZE_RES:
  444. if (crq->len <= 0) {
  445. dev_err(ibmvtpm->dev, "Invalid rtce size\n");
  446. return;
  447. }
  448. ibmvtpm->rtce_size = crq->len;
  449. ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
  450. GFP_KERNEL);
  451. if (!ibmvtpm->rtce_buf) {
  452. dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
  453. return;
  454. }
  455. ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
  456. ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
  457. DMA_BIDIRECTIONAL);
  458. if (dma_mapping_error(ibmvtpm->dev,
  459. ibmvtpm->rtce_dma_handle)) {
  460. kfree(ibmvtpm->rtce_buf);
  461. ibmvtpm->rtce_buf = NULL;
  462. dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
  463. }
  464. return;
  465. case VTPM_GET_VERSION_RES:
  466. ibmvtpm->vtpm_version = crq->data;
  467. return;
  468. case VTPM_TPM_COMMAND_RES:
  469. /* len of the data in rtce buffer */
  470. ibmvtpm->res_len = crq->len;
  471. wake_up_interruptible(&ibmvtpm->wq);
  472. return;
  473. default:
  474. return;
  475. }
  476. }
  477. return;
  478. }
  479. /**
  480. * ibmvtpm_interrupt - Interrupt handler
  481. * @irq: irq number to handle
  482. * @vtpm_instance: vtpm that received interrupt
  483. *
  484. * Returns:
  485. * IRQ_HANDLED
  486. **/
  487. static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
  488. {
  489. struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
  490. struct ibmvtpm_crq *crq;
  491. /* while loop is needed for initial setup (get version and
  492. * get rtce_size). There should be only one tpm request at any
  493. * given time.
  494. */
  495. while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
  496. ibmvtpm_crq_process(crq, ibmvtpm);
  497. crq->valid = 0;
  498. smp_wmb();
  499. }
  500. return IRQ_HANDLED;
  501. }
  502. /**
  503. * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
  504. * @vio_dev: vio device struct
  505. * @id: vio device id struct
  506. *
  507. * Return value:
  508. * 0 - Success
  509. * Non-zero - Failure
  510. */
  511. static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
  512. const struct vio_device_id *id)
  513. {
  514. struct ibmvtpm_dev *ibmvtpm;
  515. struct device *dev = &vio_dev->dev;
  516. struct ibmvtpm_crq_queue *crq_q;
  517. struct tpm_chip *chip;
  518. int rc = -ENOMEM, rc1;
  519. chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
  520. if (!chip) {
  521. dev_err(dev, "tpm_register_hardware failed\n");
  522. return -ENODEV;
  523. }
  524. ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
  525. if (!ibmvtpm) {
  526. dev_err(dev, "kzalloc for ibmvtpm failed\n");
  527. goto cleanup;
  528. }
  529. crq_q = &ibmvtpm->crq_queue;
  530. crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
  531. if (!crq_q->crq_addr) {
  532. dev_err(dev, "Unable to allocate memory for crq_addr\n");
  533. goto cleanup;
  534. }
  535. crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
  536. ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
  537. CRQ_RES_BUF_SIZE,
  538. DMA_BIDIRECTIONAL);
  539. if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
  540. dev_err(dev, "dma mapping failed\n");
  541. goto cleanup;
  542. }
  543. rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
  544. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  545. if (rc == H_RESOURCE)
  546. rc = ibmvtpm_reset_crq(ibmvtpm);
  547. if (rc) {
  548. dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
  549. goto reg_crq_cleanup;
  550. }
  551. rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
  552. tpm_ibmvtpm_driver_name, ibmvtpm);
  553. if (rc) {
  554. dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
  555. goto init_irq_cleanup;
  556. }
  557. rc = vio_enable_interrupts(vio_dev);
  558. if (rc) {
  559. dev_err(dev, "Error %d enabling interrupts\n", rc);
  560. goto init_irq_cleanup;
  561. }
  562. init_waitqueue_head(&ibmvtpm->wq);
  563. crq_q->index = 0;
  564. ibmvtpm->dev = dev;
  565. ibmvtpm->vdev = vio_dev;
  566. TPM_VPRIV(chip) = (void *)ibmvtpm;
  567. spin_lock_init(&ibmvtpm->rtce_lock);
  568. rc = ibmvtpm_crq_send_init(ibmvtpm);
  569. if (rc)
  570. goto init_irq_cleanup;
  571. rc = ibmvtpm_crq_get_version(ibmvtpm);
  572. if (rc)
  573. goto init_irq_cleanup;
  574. rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
  575. if (rc)
  576. goto init_irq_cleanup;
  577. return rc;
  578. init_irq_cleanup:
  579. do {
  580. rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
  581. } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
  582. reg_crq_cleanup:
  583. dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
  584. DMA_BIDIRECTIONAL);
  585. cleanup:
  586. if (ibmvtpm) {
  587. if (crq_q->crq_addr)
  588. free_page((unsigned long)crq_q->crq_addr);
  589. kfree(ibmvtpm);
  590. }
  591. tpm_remove_hardware(dev);
  592. return rc;
  593. }
  594. static struct vio_driver ibmvtpm_driver = {
  595. .id_table = tpm_ibmvtpm_device_table,
  596. .probe = tpm_ibmvtpm_probe,
  597. .remove = tpm_ibmvtpm_remove,
  598. .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
  599. .name = tpm_ibmvtpm_driver_name,
  600. .pm = &tpm_ibmvtpm_pm_ops,
  601. };
  602. /**
  603. * ibmvtpm_module_init - Initialize ibm vtpm module
  604. *
  605. * Return value:
  606. * 0 -Success
  607. * Non-zero - Failure
  608. */
  609. static int __init ibmvtpm_module_init(void)
  610. {
  611. return vio_register_driver(&ibmvtpm_driver);
  612. }
  613. /**
  614. * ibmvtpm_module_exit - Teardown ibm vtpm module
  615. *
  616. * Return value:
  617. * Nothing
  618. */
  619. static void __exit ibmvtpm_module_exit(void)
  620. {
  621. vio_unregister_driver(&ibmvtpm_driver);
  622. }
  623. module_init(ibmvtpm_module_init);
  624. module_exit(ibmvtpm_module_exit);
  625. MODULE_AUTHOR("adlai@us.ibm.com");
  626. MODULE_DESCRIPTION("IBM vTPM Driver");
  627. MODULE_VERSION("1.0");
  628. MODULE_LICENSE("GPL");