tpm_tis.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Copyright (C) 2005, 2006 IBM Corporation
  3. *
  4. * Authors:
  5. * Leendert van Doorn <leendert@watson.ibm.com>
  6. * Kylene Hall <kjhall@us.ibm.com>
  7. *
  8. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  9. *
  10. * Device driver for TCG/TCPA TPM (trusted platform module).
  11. * Specifications at www.trustedcomputinggroup.org
  12. *
  13. * This device driver implements the TPM interface as defined in
  14. * the TCG TPM Interface Spec version 1.2, revision 1.0.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License as
  18. * published by the Free Software Foundation, version 2 of the
  19. * License.
  20. */
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/pnp.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/wait.h>
  27. #include "tpm.h"
  28. #define TPM_HEADER_SIZE 10
  29. enum tis_access {
  30. TPM_ACCESS_VALID = 0x80,
  31. TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  32. TPM_ACCESS_REQUEST_PENDING = 0x04,
  33. TPM_ACCESS_REQUEST_USE = 0x02,
  34. };
  35. enum tis_status {
  36. TPM_STS_VALID = 0x80,
  37. TPM_STS_COMMAND_READY = 0x40,
  38. TPM_STS_GO = 0x20,
  39. TPM_STS_DATA_AVAIL = 0x10,
  40. TPM_STS_DATA_EXPECT = 0x08,
  41. };
  42. enum tis_int_flags {
  43. TPM_GLOBAL_INT_ENABLE = 0x80000000,
  44. TPM_INTF_BURST_COUNT_STATIC = 0x100,
  45. TPM_INTF_CMD_READY_INT = 0x080,
  46. TPM_INTF_INT_EDGE_FALLING = 0x040,
  47. TPM_INTF_INT_EDGE_RISING = 0x020,
  48. TPM_INTF_INT_LEVEL_LOW = 0x010,
  49. TPM_INTF_INT_LEVEL_HIGH = 0x008,
  50. TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  51. TPM_INTF_STS_VALID_INT = 0x002,
  52. TPM_INTF_DATA_AVAIL_INT = 0x001,
  53. };
  54. enum tis_defaults {
  55. TIS_MEM_BASE = 0xFED40000,
  56. TIS_MEM_LEN = 0x5000,
  57. TIS_SHORT_TIMEOUT = 750, /* ms */
  58. TIS_LONG_TIMEOUT = 2000, /* 2 sec */
  59. };
  60. #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
  61. #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
  62. #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
  63. #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
  64. #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
  65. #define TPM_STS(l) (0x0018 | ((l) << 12))
  66. #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
  67. #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
  68. #define TPM_RID(l) (0x0F04 | ((l) << 12))
  69. static LIST_HEAD(tis_chips);
  70. static DEFINE_SPINLOCK(tis_lock);
  71. static int check_locality(struct tpm_chip *chip, int l)
  72. {
  73. if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  74. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  75. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  76. return chip->vendor.locality = l;
  77. return -1;
  78. }
  79. static void release_locality(struct tpm_chip *chip, int l, int force)
  80. {
  81. if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  82. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  83. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  84. iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  85. chip->vendor.iobase + TPM_ACCESS(l));
  86. }
  87. static int request_locality(struct tpm_chip *chip, int l)
  88. {
  89. unsigned long stop;
  90. long rc;
  91. if (check_locality(chip, l) >= 0)
  92. return l;
  93. iowrite8(TPM_ACCESS_REQUEST_USE,
  94. chip->vendor.iobase + TPM_ACCESS(l));
  95. if (chip->vendor.irq) {
  96. rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  97. (check_locality
  98. (chip, l) >= 0),
  99. chip->vendor.timeout_a);
  100. if (rc > 0)
  101. return l;
  102. } else {
  103. /* wait for burstcount */
  104. stop = jiffies + chip->vendor.timeout_a;
  105. do {
  106. if (check_locality(chip, l) >= 0)
  107. return l;
  108. msleep(TPM_TIMEOUT);
  109. }
  110. while (time_before(jiffies, stop));
  111. }
  112. return -1;
  113. }
  114. static u8 tpm_tis_status(struct tpm_chip *chip)
  115. {
  116. return ioread8(chip->vendor.iobase +
  117. TPM_STS(chip->vendor.locality));
  118. }
  119. static void tpm_tis_ready(struct tpm_chip *chip)
  120. {
  121. /* this causes the current command to be aborted */
  122. iowrite8(TPM_STS_COMMAND_READY,
  123. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  124. }
  125. static int get_burstcount(struct tpm_chip *chip)
  126. {
  127. unsigned long stop;
  128. int burstcnt;
  129. /* wait for burstcount */
  130. /* which timeout value, spec has 2 answers (c & d) */
  131. stop = jiffies + chip->vendor.timeout_d;
  132. do {
  133. burstcnt = ioread8(chip->vendor.iobase +
  134. TPM_STS(chip->vendor.locality) + 1);
  135. burstcnt += ioread8(chip->vendor.iobase +
  136. TPM_STS(chip->vendor.locality) +
  137. 2) << 8;
  138. if (burstcnt)
  139. return burstcnt;
  140. msleep(TPM_TIMEOUT);
  141. } while (time_before(jiffies, stop));
  142. return -EBUSY;
  143. }
  144. static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
  145. wait_queue_head_t *queue)
  146. {
  147. unsigned long stop;
  148. long rc;
  149. u8 status;
  150. /* check current status */
  151. status = tpm_tis_status(chip);
  152. if ((status & mask) == mask)
  153. return 0;
  154. if (chip->vendor.irq) {
  155. rc = wait_event_interruptible_timeout(*queue,
  156. ((tpm_tis_status
  157. (chip) & mask) ==
  158. mask), timeout);
  159. if (rc > 0)
  160. return 0;
  161. } else {
  162. stop = jiffies + timeout;
  163. do {
  164. msleep(TPM_TIMEOUT);
  165. status = tpm_tis_status(chip);
  166. if ((status & mask) == mask)
  167. return 0;
  168. } while (time_before(jiffies, stop));
  169. }
  170. return -ETIME;
  171. }
  172. static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
  173. {
  174. int size = 0, burstcnt;
  175. while (size < count &&
  176. wait_for_stat(chip,
  177. TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  178. chip->vendor.timeout_c,
  179. &chip->vendor.read_queue)
  180. == 0) {
  181. burstcnt = get_burstcount(chip);
  182. for (; burstcnt > 0 && size < count; burstcnt--)
  183. buf[size++] = ioread8(chip->vendor.iobase +
  184. TPM_DATA_FIFO(chip->vendor.
  185. locality));
  186. }
  187. return size;
  188. }
  189. static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  190. {
  191. int size = 0;
  192. int expected, status;
  193. if (count < TPM_HEADER_SIZE) {
  194. size = -EIO;
  195. goto out;
  196. }
  197. /* read first 10 bytes, including tag, paramsize, and result */
  198. if ((size =
  199. recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
  200. dev_err(chip->dev, "Unable to read header\n");
  201. goto out;
  202. }
  203. expected = be32_to_cpu(*(__be32 *) (buf + 2));
  204. if (expected > count) {
  205. size = -EIO;
  206. goto out;
  207. }
  208. if ((size +=
  209. recv_data(chip, &buf[TPM_HEADER_SIZE],
  210. expected - TPM_HEADER_SIZE)) < expected) {
  211. dev_err(chip->dev, "Unable to read remainder of result\n");
  212. size = -ETIME;
  213. goto out;
  214. }
  215. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  216. &chip->vendor.int_queue);
  217. status = tpm_tis_status(chip);
  218. if (status & TPM_STS_DATA_AVAIL) { /* retry? */
  219. dev_err(chip->dev, "Error left over data\n");
  220. size = -EIO;
  221. goto out;
  222. }
  223. out:
  224. tpm_tis_ready(chip);
  225. release_locality(chip, chip->vendor.locality, 0);
  226. return size;
  227. }
  228. static int itpm;
  229. module_param(itpm, bool, 0444);
  230. MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
  231. /*
  232. * If interrupts are used (signaled by an irq set in the vendor structure)
  233. * tpm.c can skip polling for the data to be available as the interrupt is
  234. * waited for here
  235. */
  236. static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
  237. {
  238. int rc, status, burstcnt;
  239. size_t count = 0;
  240. u32 ordinal;
  241. if (request_locality(chip, 0) < 0)
  242. return -EBUSY;
  243. status = tpm_tis_status(chip);
  244. if ((status & TPM_STS_COMMAND_READY) == 0) {
  245. tpm_tis_ready(chip);
  246. if (wait_for_stat
  247. (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
  248. &chip->vendor.int_queue) < 0) {
  249. rc = -ETIME;
  250. goto out_err;
  251. }
  252. }
  253. while (count < len - 1) {
  254. burstcnt = get_burstcount(chip);
  255. for (; burstcnt > 0 && count < len - 1; burstcnt--) {
  256. iowrite8(buf[count], chip->vendor.iobase +
  257. TPM_DATA_FIFO(chip->vendor.locality));
  258. count++;
  259. }
  260. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  261. &chip->vendor.int_queue);
  262. status = tpm_tis_status(chip);
  263. if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
  264. rc = -EIO;
  265. goto out_err;
  266. }
  267. }
  268. /* write last byte */
  269. iowrite8(buf[count],
  270. chip->vendor.iobase +
  271. TPM_DATA_FIFO(chip->vendor.locality));
  272. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  273. &chip->vendor.int_queue);
  274. status = tpm_tis_status(chip);
  275. if ((status & TPM_STS_DATA_EXPECT) != 0) {
  276. rc = -EIO;
  277. goto out_err;
  278. }
  279. /* go and do it */
  280. iowrite8(TPM_STS_GO,
  281. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  282. if (chip->vendor.irq) {
  283. ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
  284. if (wait_for_stat
  285. (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  286. tpm_calc_ordinal_duration(chip, ordinal),
  287. &chip->vendor.read_queue) < 0) {
  288. rc = -ETIME;
  289. goto out_err;
  290. }
  291. }
  292. return len;
  293. out_err:
  294. tpm_tis_ready(chip);
  295. release_locality(chip, chip->vendor.locality, 0);
  296. return rc;
  297. }
  298. static const struct file_operations tis_ops = {
  299. .owner = THIS_MODULE,
  300. .llseek = no_llseek,
  301. .open = tpm_open,
  302. .read = tpm_read,
  303. .write = tpm_write,
  304. .release = tpm_release,
  305. };
  306. static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  307. static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
  308. static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
  309. static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
  310. static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
  311. static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
  312. NULL);
  313. static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
  314. static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
  315. static struct attribute *tis_attrs[] = {
  316. &dev_attr_pubek.attr,
  317. &dev_attr_pcrs.attr,
  318. &dev_attr_enabled.attr,
  319. &dev_attr_active.attr,
  320. &dev_attr_owned.attr,
  321. &dev_attr_temp_deactivated.attr,
  322. &dev_attr_caps.attr,
  323. &dev_attr_cancel.attr, NULL,
  324. };
  325. static struct attribute_group tis_attr_grp = {
  326. .attrs = tis_attrs
  327. };
  328. static struct tpm_vendor_specific tpm_tis = {
  329. .status = tpm_tis_status,
  330. .recv = tpm_tis_recv,
  331. .send = tpm_tis_send,
  332. .cancel = tpm_tis_ready,
  333. .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  334. .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  335. .req_canceled = TPM_STS_COMMAND_READY,
  336. .attr_group = &tis_attr_grp,
  337. .miscdev = {
  338. .fops = &tis_ops,},
  339. };
  340. static irqreturn_t tis_int_probe(int irq, void *dev_id)
  341. {
  342. struct tpm_chip *chip = dev_id;
  343. u32 interrupt;
  344. interrupt = ioread32(chip->vendor.iobase +
  345. TPM_INT_STATUS(chip->vendor.locality));
  346. if (interrupt == 0)
  347. return IRQ_NONE;
  348. chip->vendor.irq = irq;
  349. /* Clear interrupts handled with TPM_EOI */
  350. iowrite32(interrupt,
  351. chip->vendor.iobase +
  352. TPM_INT_STATUS(chip->vendor.locality));
  353. return IRQ_HANDLED;
  354. }
  355. static irqreturn_t tis_int_handler(int dummy, void *dev_id)
  356. {
  357. struct tpm_chip *chip = dev_id;
  358. u32 interrupt;
  359. int i;
  360. interrupt = ioread32(chip->vendor.iobase +
  361. TPM_INT_STATUS(chip->vendor.locality));
  362. if (interrupt == 0)
  363. return IRQ_NONE;
  364. if (interrupt & TPM_INTF_DATA_AVAIL_INT)
  365. wake_up_interruptible(&chip->vendor.read_queue);
  366. if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
  367. for (i = 0; i < 5; i++)
  368. if (check_locality(chip, i) >= 0)
  369. break;
  370. if (interrupt &
  371. (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
  372. TPM_INTF_CMD_READY_INT))
  373. wake_up_interruptible(&chip->vendor.int_queue);
  374. /* Clear interrupts handled with TPM_EOI */
  375. iowrite32(interrupt,
  376. chip->vendor.iobase +
  377. TPM_INT_STATUS(chip->vendor.locality));
  378. ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
  379. return IRQ_HANDLED;
  380. }
  381. static int interrupts = 1;
  382. module_param(interrupts, bool, 0444);
  383. MODULE_PARM_DESC(interrupts, "Enable interrupts");
  384. static int tpm_tis_init(struct device *dev, resource_size_t start,
  385. resource_size_t len, unsigned int irq)
  386. {
  387. u32 vendor, intfcaps, intmask;
  388. int rc, i;
  389. struct tpm_chip *chip;
  390. if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
  391. return -ENODEV;
  392. chip->vendor.iobase = ioremap(start, len);
  393. if (!chip->vendor.iobase) {
  394. rc = -EIO;
  395. goto out_err;
  396. }
  397. /* Default timeouts */
  398. chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  399. chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
  400. chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  401. chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  402. if (request_locality(chip, 0) != 0) {
  403. rc = -ENODEV;
  404. goto out_err;
  405. }
  406. vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
  407. dev_info(dev,
  408. "1.2 TPM (device-id 0x%X, rev-id %d)\n",
  409. vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
  410. if (itpm)
  411. dev_info(dev, "Intel iTPM workaround enabled\n");
  412. /* Figure out the capabilities */
  413. intfcaps =
  414. ioread32(chip->vendor.iobase +
  415. TPM_INTF_CAPS(chip->vendor.locality));
  416. dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
  417. intfcaps);
  418. if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
  419. dev_dbg(dev, "\tBurst Count Static\n");
  420. if (intfcaps & TPM_INTF_CMD_READY_INT)
  421. dev_dbg(dev, "\tCommand Ready Int Support\n");
  422. if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
  423. dev_dbg(dev, "\tInterrupt Edge Falling\n");
  424. if (intfcaps & TPM_INTF_INT_EDGE_RISING)
  425. dev_dbg(dev, "\tInterrupt Edge Rising\n");
  426. if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
  427. dev_dbg(dev, "\tInterrupt Level Low\n");
  428. if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
  429. dev_dbg(dev, "\tInterrupt Level High\n");
  430. if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
  431. dev_dbg(dev, "\tLocality Change Int Support\n");
  432. if (intfcaps & TPM_INTF_STS_VALID_INT)
  433. dev_dbg(dev, "\tSts Valid Int Support\n");
  434. if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
  435. dev_dbg(dev, "\tData Avail Int Support\n");
  436. /* INTERRUPT Setup */
  437. init_waitqueue_head(&chip->vendor.read_queue);
  438. init_waitqueue_head(&chip->vendor.int_queue);
  439. intmask =
  440. ioread32(chip->vendor.iobase +
  441. TPM_INT_ENABLE(chip->vendor.locality));
  442. intmask |= TPM_INTF_CMD_READY_INT
  443. | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
  444. | TPM_INTF_STS_VALID_INT;
  445. iowrite32(intmask,
  446. chip->vendor.iobase +
  447. TPM_INT_ENABLE(chip->vendor.locality));
  448. if (interrupts)
  449. chip->vendor.irq = irq;
  450. if (interrupts && !chip->vendor.irq) {
  451. chip->vendor.irq =
  452. ioread8(chip->vendor.iobase +
  453. TPM_INT_VECTOR(chip->vendor.locality));
  454. for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
  455. iowrite8(i, chip->vendor.iobase +
  456. TPM_INT_VECTOR(chip->vendor.locality));
  457. if (request_irq
  458. (i, tis_int_probe, IRQF_SHARED,
  459. chip->vendor.miscdev.name, chip) != 0) {
  460. dev_info(chip->dev,
  461. "Unable to request irq: %d for probe\n",
  462. i);
  463. continue;
  464. }
  465. /* Clear all existing */
  466. iowrite32(ioread32
  467. (chip->vendor.iobase +
  468. TPM_INT_STATUS(chip->vendor.locality)),
  469. chip->vendor.iobase +
  470. TPM_INT_STATUS(chip->vendor.locality));
  471. /* Turn on */
  472. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  473. chip->vendor.iobase +
  474. TPM_INT_ENABLE(chip->vendor.locality));
  475. /* Generate Interrupts */
  476. tpm_gen_interrupt(chip);
  477. /* Turn off */
  478. iowrite32(intmask,
  479. chip->vendor.iobase +
  480. TPM_INT_ENABLE(chip->vendor.locality));
  481. free_irq(i, chip);
  482. }
  483. }
  484. if (chip->vendor.irq) {
  485. iowrite8(chip->vendor.irq,
  486. chip->vendor.iobase +
  487. TPM_INT_VECTOR(chip->vendor.locality));
  488. if (request_irq
  489. (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
  490. chip->vendor.miscdev.name, chip) != 0) {
  491. dev_info(chip->dev,
  492. "Unable to request irq: %d for use\n",
  493. chip->vendor.irq);
  494. chip->vendor.irq = 0;
  495. } else {
  496. /* Clear all existing */
  497. iowrite32(ioread32
  498. (chip->vendor.iobase +
  499. TPM_INT_STATUS(chip->vendor.locality)),
  500. chip->vendor.iobase +
  501. TPM_INT_STATUS(chip->vendor.locality));
  502. /* Turn on */
  503. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  504. chip->vendor.iobase +
  505. TPM_INT_ENABLE(chip->vendor.locality));
  506. }
  507. }
  508. INIT_LIST_HEAD(&chip->vendor.list);
  509. spin_lock(&tis_lock);
  510. list_add(&chip->vendor.list, &tis_chips);
  511. spin_unlock(&tis_lock);
  512. tpm_get_timeouts(chip);
  513. tpm_continue_selftest(chip);
  514. return 0;
  515. out_err:
  516. if (chip->vendor.iobase)
  517. iounmap(chip->vendor.iobase);
  518. tpm_remove_hardware(chip->dev);
  519. return rc;
  520. }
  521. static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
  522. const struct pnp_device_id *pnp_id)
  523. {
  524. resource_size_t start, len;
  525. unsigned int irq = 0;
  526. start = pnp_mem_start(pnp_dev, 0);
  527. len = pnp_mem_len(pnp_dev, 0);
  528. if (pnp_irq_valid(pnp_dev, 0))
  529. irq = pnp_irq(pnp_dev, 0);
  530. else
  531. interrupts = 0;
  532. return tpm_tis_init(&pnp_dev->dev, start, len, irq);
  533. }
  534. static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
  535. {
  536. return tpm_pm_suspend(&dev->dev, msg);
  537. }
  538. static int tpm_tis_pnp_resume(struct pnp_dev *dev)
  539. {
  540. return tpm_pm_resume(&dev->dev);
  541. }
  542. static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
  543. {"PNP0C31", 0}, /* TPM */
  544. {"ATM1200", 0}, /* Atmel */
  545. {"IFX0102", 0}, /* Infineon */
  546. {"BCM0101", 0}, /* Broadcom */
  547. {"BCM0102", 0}, /* Broadcom */
  548. {"NSC1200", 0}, /* National */
  549. {"ICO0102", 0}, /* Intel */
  550. /* Add new here */
  551. {"", 0}, /* User Specified */
  552. {"", 0} /* Terminator */
  553. };
  554. MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
  555. static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
  556. {
  557. struct tpm_chip *chip = pnp_get_drvdata(dev);
  558. tpm_dev_vendor_release(chip);
  559. kfree(chip);
  560. }
  561. static struct pnp_driver tis_pnp_driver = {
  562. .name = "tpm_tis",
  563. .id_table = tpm_pnp_tbl,
  564. .probe = tpm_tis_pnp_init,
  565. .suspend = tpm_tis_pnp_suspend,
  566. .resume = tpm_tis_pnp_resume,
  567. .remove = tpm_tis_pnp_remove,
  568. };
  569. #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
  570. module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
  571. sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
  572. MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
  573. static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
  574. {
  575. return tpm_pm_suspend(&dev->dev, msg);
  576. }
  577. static int tpm_tis_resume(struct platform_device *dev)
  578. {
  579. return tpm_pm_resume(&dev->dev);
  580. }
  581. static struct platform_driver tis_drv = {
  582. .driver = {
  583. .name = "tpm_tis",
  584. .owner = THIS_MODULE,
  585. },
  586. .suspend = tpm_tis_suspend,
  587. .resume = tpm_tis_resume,
  588. };
  589. static struct platform_device *pdev;
  590. static int force;
  591. module_param(force, bool, 0444);
  592. MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
  593. static int __init init_tis(void)
  594. {
  595. int rc;
  596. if (force) {
  597. rc = platform_driver_register(&tis_drv);
  598. if (rc < 0)
  599. return rc;
  600. if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
  601. return PTR_ERR(pdev);
  602. if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
  603. platform_device_unregister(pdev);
  604. platform_driver_unregister(&tis_drv);
  605. }
  606. return rc;
  607. }
  608. return pnp_register_driver(&tis_pnp_driver);
  609. }
  610. static void __exit cleanup_tis(void)
  611. {
  612. struct tpm_vendor_specific *i, *j;
  613. struct tpm_chip *chip;
  614. spin_lock(&tis_lock);
  615. list_for_each_entry_safe(i, j, &tis_chips, list) {
  616. chip = to_tpm_chip(i);
  617. tpm_remove_hardware(chip->dev);
  618. iowrite32(~TPM_GLOBAL_INT_ENABLE &
  619. ioread32(chip->vendor.iobase +
  620. TPM_INT_ENABLE(chip->vendor.
  621. locality)),
  622. chip->vendor.iobase +
  623. TPM_INT_ENABLE(chip->vendor.locality));
  624. release_locality(chip, chip->vendor.locality, 1);
  625. if (chip->vendor.irq)
  626. free_irq(chip->vendor.irq, chip);
  627. iounmap(i->iobase);
  628. list_del(&i->list);
  629. }
  630. spin_unlock(&tis_lock);
  631. if (force) {
  632. platform_device_unregister(pdev);
  633. platform_driver_unregister(&tis_drv);
  634. } else
  635. pnp_unregister_driver(&tis_pnp_driver);
  636. }
  637. module_init(init_tis);
  638. module_exit(cleanup_tis);
  639. MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  640. MODULE_DESCRIPTION("TPM Driver");
  641. MODULE_VERSION("2.0");
  642. MODULE_LICENSE("GPL");