tpm_tis.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. /*
  2. * Copyright (C) 2005, 2006 IBM Corporation
  3. *
  4. * Authors:
  5. * Leendert van Doorn <leendert@watson.ibm.com>
  6. * Kylene Hall <kjhall@us.ibm.com>
  7. *
  8. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  9. *
  10. * Device driver for TCG/TCPA TPM (trusted platform module).
  11. * Specifications at www.trustedcomputinggroup.org
  12. *
  13. * This device driver implements the TPM interface as defined in
  14. * the TCG TPM Interface Spec version 1.2, revision 1.0.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License as
  18. * published by the Free Software Foundation, version 2 of the
  19. * License.
  20. */
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/pnp.h>
  25. #include <linux/slab.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/wait.h>
  28. #include <linux/acpi.h>
  29. #include <linux/freezer.h>
  30. #include "tpm.h"
  31. #define TPM_HEADER_SIZE 10
  32. enum tis_access {
  33. TPM_ACCESS_VALID = 0x80,
  34. TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  35. TPM_ACCESS_REQUEST_PENDING = 0x04,
  36. TPM_ACCESS_REQUEST_USE = 0x02,
  37. };
  38. enum tis_status {
  39. TPM_STS_VALID = 0x80,
  40. TPM_STS_COMMAND_READY = 0x40,
  41. TPM_STS_GO = 0x20,
  42. TPM_STS_DATA_AVAIL = 0x10,
  43. TPM_STS_DATA_EXPECT = 0x08,
  44. };
  45. enum tis_int_flags {
  46. TPM_GLOBAL_INT_ENABLE = 0x80000000,
  47. TPM_INTF_BURST_COUNT_STATIC = 0x100,
  48. TPM_INTF_CMD_READY_INT = 0x080,
  49. TPM_INTF_INT_EDGE_FALLING = 0x040,
  50. TPM_INTF_INT_EDGE_RISING = 0x020,
  51. TPM_INTF_INT_LEVEL_LOW = 0x010,
  52. TPM_INTF_INT_LEVEL_HIGH = 0x008,
  53. TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  54. TPM_INTF_STS_VALID_INT = 0x002,
  55. TPM_INTF_DATA_AVAIL_INT = 0x001,
  56. };
  57. enum tis_defaults {
  58. TIS_MEM_BASE = 0xFED40000,
  59. TIS_MEM_LEN = 0x5000,
  60. TIS_SHORT_TIMEOUT = 750, /* ms */
  61. TIS_LONG_TIMEOUT = 2000, /* 2 sec */
  62. };
  63. #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
  64. #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
  65. #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
  66. #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
  67. #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
  68. #define TPM_STS(l) (0x0018 | ((l) << 12))
  69. #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
  70. #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
  71. #define TPM_RID(l) (0x0F04 | ((l) << 12))
  72. static LIST_HEAD(tis_chips);
  73. static DEFINE_SPINLOCK(tis_lock);
  74. #ifdef CONFIG_ACPI
  75. static int is_itpm(struct pnp_dev *dev)
  76. {
  77. struct acpi_device *acpi = pnp_acpi_device(dev);
  78. struct acpi_hardware_id *id;
  79. list_for_each_entry(id, &acpi->pnp.ids, list) {
  80. if (!strcmp("INTC0102", id->id))
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. #else
  86. static int is_itpm(struct pnp_dev *dev)
  87. {
  88. return 0;
  89. }
  90. #endif
  91. static int check_locality(struct tpm_chip *chip, int l)
  92. {
  93. if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  94. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  95. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  96. return chip->vendor.locality = l;
  97. return -1;
  98. }
  99. static void release_locality(struct tpm_chip *chip, int l, int force)
  100. {
  101. if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  102. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  103. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  104. iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  105. chip->vendor.iobase + TPM_ACCESS(l));
  106. }
  107. static int request_locality(struct tpm_chip *chip, int l)
  108. {
  109. unsigned long stop, timeout;
  110. long rc;
  111. if (check_locality(chip, l) >= 0)
  112. return l;
  113. iowrite8(TPM_ACCESS_REQUEST_USE,
  114. chip->vendor.iobase + TPM_ACCESS(l));
  115. stop = jiffies + chip->vendor.timeout_a;
  116. if (chip->vendor.irq) {
  117. again:
  118. timeout = stop - jiffies;
  119. if ((long)timeout <= 0)
  120. return -1;
  121. rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  122. (check_locality
  123. (chip, l) >= 0),
  124. timeout);
  125. if (rc > 0)
  126. return l;
  127. if (rc == -ERESTARTSYS && freezing(current)) {
  128. clear_thread_flag(TIF_SIGPENDING);
  129. goto again;
  130. }
  131. } else {
  132. /* wait for burstcount */
  133. do {
  134. if (check_locality(chip, l) >= 0)
  135. return l;
  136. msleep(TPM_TIMEOUT);
  137. }
  138. while (time_before(jiffies, stop));
  139. }
  140. return -1;
  141. }
  142. static u8 tpm_tis_status(struct tpm_chip *chip)
  143. {
  144. return ioread8(chip->vendor.iobase +
  145. TPM_STS(chip->vendor.locality));
  146. }
  147. static void tpm_tis_ready(struct tpm_chip *chip)
  148. {
  149. /* this causes the current command to be aborted */
  150. iowrite8(TPM_STS_COMMAND_READY,
  151. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  152. }
  153. static int get_burstcount(struct tpm_chip *chip)
  154. {
  155. unsigned long stop;
  156. int burstcnt;
  157. /* wait for burstcount */
  158. /* which timeout value, spec has 2 answers (c & d) */
  159. stop = jiffies + chip->vendor.timeout_d;
  160. do {
  161. burstcnt = ioread8(chip->vendor.iobase +
  162. TPM_STS(chip->vendor.locality) + 1);
  163. burstcnt += ioread8(chip->vendor.iobase +
  164. TPM_STS(chip->vendor.locality) +
  165. 2) << 8;
  166. if (burstcnt)
  167. return burstcnt;
  168. msleep(TPM_TIMEOUT);
  169. } while (time_before(jiffies, stop));
  170. return -EBUSY;
  171. }
  172. static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
  173. wait_queue_head_t *queue)
  174. {
  175. unsigned long stop;
  176. long rc;
  177. u8 status;
  178. /* check current status */
  179. status = tpm_tis_status(chip);
  180. if ((status & mask) == mask)
  181. return 0;
  182. stop = jiffies + timeout;
  183. if (chip->vendor.irq) {
  184. again:
  185. timeout = stop - jiffies;
  186. if ((long)timeout <= 0)
  187. return -ETIME;
  188. rc = wait_event_interruptible_timeout(*queue,
  189. ((tpm_tis_status
  190. (chip) & mask) ==
  191. mask), timeout);
  192. if (rc > 0)
  193. return 0;
  194. if (rc == -ERESTARTSYS && freezing(current)) {
  195. clear_thread_flag(TIF_SIGPENDING);
  196. goto again;
  197. }
  198. } else {
  199. do {
  200. msleep(TPM_TIMEOUT);
  201. status = tpm_tis_status(chip);
  202. if ((status & mask) == mask)
  203. return 0;
  204. } while (time_before(jiffies, stop));
  205. }
  206. return -ETIME;
  207. }
  208. static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
  209. {
  210. int size = 0, burstcnt;
  211. while (size < count &&
  212. wait_for_stat(chip,
  213. TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  214. chip->vendor.timeout_c,
  215. &chip->vendor.read_queue)
  216. == 0) {
  217. burstcnt = get_burstcount(chip);
  218. for (; burstcnt > 0 && size < count; burstcnt--)
  219. buf[size++] = ioread8(chip->vendor.iobase +
  220. TPM_DATA_FIFO(chip->vendor.
  221. locality));
  222. }
  223. return size;
  224. }
  225. static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  226. {
  227. int size = 0;
  228. int expected, status;
  229. if (count < TPM_HEADER_SIZE) {
  230. size = -EIO;
  231. goto out;
  232. }
  233. /* read first 10 bytes, including tag, paramsize, and result */
  234. if ((size =
  235. recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
  236. dev_err(chip->dev, "Unable to read header\n");
  237. goto out;
  238. }
  239. expected = be32_to_cpu(*(__be32 *) (buf + 2));
  240. if (expected > count) {
  241. size = -EIO;
  242. goto out;
  243. }
  244. if ((size +=
  245. recv_data(chip, &buf[TPM_HEADER_SIZE],
  246. expected - TPM_HEADER_SIZE)) < expected) {
  247. dev_err(chip->dev, "Unable to read remainder of result\n");
  248. size = -ETIME;
  249. goto out;
  250. }
  251. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  252. &chip->vendor.int_queue);
  253. status = tpm_tis_status(chip);
  254. if (status & TPM_STS_DATA_AVAIL) { /* retry? */
  255. dev_err(chip->dev, "Error left over data\n");
  256. size = -EIO;
  257. goto out;
  258. }
  259. out:
  260. tpm_tis_ready(chip);
  261. release_locality(chip, chip->vendor.locality, 0);
  262. return size;
  263. }
  264. static int itpm;
  265. module_param(itpm, bool, 0444);
  266. MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
  267. /*
  268. * If interrupts are used (signaled by an irq set in the vendor structure)
  269. * tpm.c can skip polling for the data to be available as the interrupt is
  270. * waited for here
  271. */
  272. static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
  273. {
  274. int rc, status, burstcnt;
  275. size_t count = 0;
  276. u32 ordinal;
  277. if (request_locality(chip, 0) < 0)
  278. return -EBUSY;
  279. status = tpm_tis_status(chip);
  280. if ((status & TPM_STS_COMMAND_READY) == 0) {
  281. tpm_tis_ready(chip);
  282. if (wait_for_stat
  283. (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
  284. &chip->vendor.int_queue) < 0) {
  285. rc = -ETIME;
  286. goto out_err;
  287. }
  288. }
  289. while (count < len - 1) {
  290. burstcnt = get_burstcount(chip);
  291. for (; burstcnt > 0 && count < len - 1; burstcnt--) {
  292. iowrite8(buf[count], chip->vendor.iobase +
  293. TPM_DATA_FIFO(chip->vendor.locality));
  294. count++;
  295. }
  296. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  297. &chip->vendor.int_queue);
  298. status = tpm_tis_status(chip);
  299. if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
  300. rc = -EIO;
  301. goto out_err;
  302. }
  303. }
  304. /* write last byte */
  305. iowrite8(buf[count],
  306. chip->vendor.iobase +
  307. TPM_DATA_FIFO(chip->vendor.locality));
  308. wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  309. &chip->vendor.int_queue);
  310. status = tpm_tis_status(chip);
  311. if ((status & TPM_STS_DATA_EXPECT) != 0) {
  312. rc = -EIO;
  313. goto out_err;
  314. }
  315. /* go and do it */
  316. iowrite8(TPM_STS_GO,
  317. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  318. if (chip->vendor.irq) {
  319. ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
  320. if (wait_for_stat
  321. (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  322. tpm_calc_ordinal_duration(chip, ordinal),
  323. &chip->vendor.read_queue) < 0) {
  324. rc = -ETIME;
  325. goto out_err;
  326. }
  327. }
  328. return len;
  329. out_err:
  330. tpm_tis_ready(chip);
  331. release_locality(chip, chip->vendor.locality, 0);
  332. return rc;
  333. }
  334. static const struct file_operations tis_ops = {
  335. .owner = THIS_MODULE,
  336. .llseek = no_llseek,
  337. .open = tpm_open,
  338. .read = tpm_read,
  339. .write = tpm_write,
  340. .release = tpm_release,
  341. };
  342. static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  343. static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
  344. static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
  345. static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
  346. static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
  347. static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
  348. NULL);
  349. static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
  350. static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
  351. static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
  352. static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
  353. static struct attribute *tis_attrs[] = {
  354. &dev_attr_pubek.attr,
  355. &dev_attr_pcrs.attr,
  356. &dev_attr_enabled.attr,
  357. &dev_attr_active.attr,
  358. &dev_attr_owned.attr,
  359. &dev_attr_temp_deactivated.attr,
  360. &dev_attr_caps.attr,
  361. &dev_attr_cancel.attr,
  362. &dev_attr_durations.attr,
  363. &dev_attr_timeouts.attr, NULL,
  364. };
  365. static struct attribute_group tis_attr_grp = {
  366. .attrs = tis_attrs
  367. };
  368. static struct tpm_vendor_specific tpm_tis = {
  369. .status = tpm_tis_status,
  370. .recv = tpm_tis_recv,
  371. .send = tpm_tis_send,
  372. .cancel = tpm_tis_ready,
  373. .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  374. .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  375. .req_canceled = TPM_STS_COMMAND_READY,
  376. .attr_group = &tis_attr_grp,
  377. .miscdev = {
  378. .fops = &tis_ops,},
  379. };
  380. static irqreturn_t tis_int_probe(int irq, void *dev_id)
  381. {
  382. struct tpm_chip *chip = dev_id;
  383. u32 interrupt;
  384. interrupt = ioread32(chip->vendor.iobase +
  385. TPM_INT_STATUS(chip->vendor.locality));
  386. if (interrupt == 0)
  387. return IRQ_NONE;
  388. chip->vendor.probed_irq = irq;
  389. /* Clear interrupts handled with TPM_EOI */
  390. iowrite32(interrupt,
  391. chip->vendor.iobase +
  392. TPM_INT_STATUS(chip->vendor.locality));
  393. return IRQ_HANDLED;
  394. }
  395. static irqreturn_t tis_int_handler(int dummy, void *dev_id)
  396. {
  397. struct tpm_chip *chip = dev_id;
  398. u32 interrupt;
  399. int i;
  400. interrupt = ioread32(chip->vendor.iobase +
  401. TPM_INT_STATUS(chip->vendor.locality));
  402. if (interrupt == 0)
  403. return IRQ_NONE;
  404. if (interrupt & TPM_INTF_DATA_AVAIL_INT)
  405. wake_up_interruptible(&chip->vendor.read_queue);
  406. if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
  407. for (i = 0; i < 5; i++)
  408. if (check_locality(chip, i) >= 0)
  409. break;
  410. if (interrupt &
  411. (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
  412. TPM_INTF_CMD_READY_INT))
  413. wake_up_interruptible(&chip->vendor.int_queue);
  414. /* Clear interrupts handled with TPM_EOI */
  415. iowrite32(interrupt,
  416. chip->vendor.iobase +
  417. TPM_INT_STATUS(chip->vendor.locality));
  418. ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
  419. return IRQ_HANDLED;
  420. }
  421. static int interrupts = 1;
  422. module_param(interrupts, bool, 0444);
  423. MODULE_PARM_DESC(interrupts, "Enable interrupts");
  424. static int tpm_tis_init(struct device *dev, resource_size_t start,
  425. resource_size_t len, unsigned int irq)
  426. {
  427. u32 vendor, intfcaps, intmask;
  428. int rc, i, irq_s, irq_e;
  429. struct tpm_chip *chip;
  430. if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
  431. return -ENODEV;
  432. chip->vendor.iobase = ioremap(start, len);
  433. if (!chip->vendor.iobase) {
  434. rc = -EIO;
  435. goto out_err;
  436. }
  437. /* Default timeouts */
  438. chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  439. chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
  440. chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  441. chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  442. if (request_locality(chip, 0) != 0) {
  443. rc = -ENODEV;
  444. goto out_err;
  445. }
  446. vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
  447. dev_info(dev,
  448. "1.2 TPM (device-id 0x%X, rev-id %d)\n",
  449. vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
  450. if (itpm)
  451. dev_info(dev, "Intel iTPM workaround enabled\n");
  452. /* Figure out the capabilities */
  453. intfcaps =
  454. ioread32(chip->vendor.iobase +
  455. TPM_INTF_CAPS(chip->vendor.locality));
  456. dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
  457. intfcaps);
  458. if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
  459. dev_dbg(dev, "\tBurst Count Static\n");
  460. if (intfcaps & TPM_INTF_CMD_READY_INT)
  461. dev_dbg(dev, "\tCommand Ready Int Support\n");
  462. if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
  463. dev_dbg(dev, "\tInterrupt Edge Falling\n");
  464. if (intfcaps & TPM_INTF_INT_EDGE_RISING)
  465. dev_dbg(dev, "\tInterrupt Edge Rising\n");
  466. if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
  467. dev_dbg(dev, "\tInterrupt Level Low\n");
  468. if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
  469. dev_dbg(dev, "\tInterrupt Level High\n");
  470. if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
  471. dev_dbg(dev, "\tLocality Change Int Support\n");
  472. if (intfcaps & TPM_INTF_STS_VALID_INT)
  473. dev_dbg(dev, "\tSts Valid Int Support\n");
  474. if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
  475. dev_dbg(dev, "\tData Avail Int Support\n");
  476. /* get the timeouts before testing for irqs */
  477. tpm_get_timeouts(chip);
  478. /* INTERRUPT Setup */
  479. init_waitqueue_head(&chip->vendor.read_queue);
  480. init_waitqueue_head(&chip->vendor.int_queue);
  481. intmask =
  482. ioread32(chip->vendor.iobase +
  483. TPM_INT_ENABLE(chip->vendor.locality));
  484. intmask |= TPM_INTF_CMD_READY_INT
  485. | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
  486. | TPM_INTF_STS_VALID_INT;
  487. iowrite32(intmask,
  488. chip->vendor.iobase +
  489. TPM_INT_ENABLE(chip->vendor.locality));
  490. if (interrupts)
  491. chip->vendor.irq = irq;
  492. if (interrupts && !chip->vendor.irq) {
  493. irq_s =
  494. ioread8(chip->vendor.iobase +
  495. TPM_INT_VECTOR(chip->vendor.locality));
  496. if (irq_s) {
  497. irq_e = irq_s;
  498. } else {
  499. irq_s = 3;
  500. irq_e = 15;
  501. }
  502. for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
  503. iowrite8(i, chip->vendor.iobase +
  504. TPM_INT_VECTOR(chip->vendor.locality));
  505. if (request_irq
  506. (i, tis_int_probe, IRQF_SHARED,
  507. chip->vendor.miscdev.name, chip) != 0) {
  508. dev_info(chip->dev,
  509. "Unable to request irq: %d for probe\n",
  510. i);
  511. continue;
  512. }
  513. /* Clear all existing */
  514. iowrite32(ioread32
  515. (chip->vendor.iobase +
  516. TPM_INT_STATUS(chip->vendor.locality)),
  517. chip->vendor.iobase +
  518. TPM_INT_STATUS(chip->vendor.locality));
  519. /* Turn on */
  520. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  521. chip->vendor.iobase +
  522. TPM_INT_ENABLE(chip->vendor.locality));
  523. chip->vendor.probed_irq = 0;
  524. /* Generate Interrupts */
  525. tpm_gen_interrupt(chip);
  526. chip->vendor.irq = chip->vendor.probed_irq;
  527. /* free_irq will call into tis_int_probe;
  528. clear all irqs we haven't seen while doing
  529. tpm_gen_interrupt */
  530. iowrite32(ioread32
  531. (chip->vendor.iobase +
  532. TPM_INT_STATUS(chip->vendor.locality)),
  533. chip->vendor.iobase +
  534. TPM_INT_STATUS(chip->vendor.locality));
  535. /* Turn off */
  536. iowrite32(intmask,
  537. chip->vendor.iobase +
  538. TPM_INT_ENABLE(chip->vendor.locality));
  539. free_irq(i, chip);
  540. }
  541. }
  542. if (chip->vendor.irq) {
  543. iowrite8(chip->vendor.irq,
  544. chip->vendor.iobase +
  545. TPM_INT_VECTOR(chip->vendor.locality));
  546. if (request_irq
  547. (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
  548. chip->vendor.miscdev.name, chip) != 0) {
  549. dev_info(chip->dev,
  550. "Unable to request irq: %d for use\n",
  551. chip->vendor.irq);
  552. chip->vendor.irq = 0;
  553. } else {
  554. /* Clear all existing */
  555. iowrite32(ioread32
  556. (chip->vendor.iobase +
  557. TPM_INT_STATUS(chip->vendor.locality)),
  558. chip->vendor.iobase +
  559. TPM_INT_STATUS(chip->vendor.locality));
  560. /* Turn on */
  561. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  562. chip->vendor.iobase +
  563. TPM_INT_ENABLE(chip->vendor.locality));
  564. }
  565. }
  566. INIT_LIST_HEAD(&chip->vendor.list);
  567. spin_lock(&tis_lock);
  568. list_add(&chip->vendor.list, &tis_chips);
  569. spin_unlock(&tis_lock);
  570. tpm_continue_selftest(chip);
  571. return 0;
  572. out_err:
  573. if (chip->vendor.iobase)
  574. iounmap(chip->vendor.iobase);
  575. tpm_remove_hardware(chip->dev);
  576. return rc;
  577. }
  578. #ifdef CONFIG_PNP
  579. static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
  580. const struct pnp_device_id *pnp_id)
  581. {
  582. resource_size_t start, len;
  583. unsigned int irq = 0;
  584. start = pnp_mem_start(pnp_dev, 0);
  585. len = pnp_mem_len(pnp_dev, 0);
  586. if (pnp_irq_valid(pnp_dev, 0))
  587. irq = pnp_irq(pnp_dev, 0);
  588. else
  589. interrupts = 0;
  590. if (is_itpm(pnp_dev))
  591. itpm = 1;
  592. return tpm_tis_init(&pnp_dev->dev, start, len, irq);
  593. }
  594. static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
  595. {
  596. return tpm_pm_suspend(&dev->dev, msg);
  597. }
  598. static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
  599. {
  600. u32 intmask;
  601. /* reenable interrupts that device may have lost or
  602. BIOS/firmware may have disabled */
  603. iowrite8(chip->vendor.irq, chip->vendor.iobase +
  604. TPM_INT_VECTOR(chip->vendor.locality));
  605. intmask =
  606. ioread32(chip->vendor.iobase +
  607. TPM_INT_ENABLE(chip->vendor.locality));
  608. intmask |= TPM_INTF_CMD_READY_INT
  609. | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
  610. | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
  611. iowrite32(intmask,
  612. chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
  613. }
  614. static int tpm_tis_pnp_resume(struct pnp_dev *dev)
  615. {
  616. struct tpm_chip *chip = pnp_get_drvdata(dev);
  617. int ret;
  618. if (chip->vendor.irq)
  619. tpm_tis_reenable_interrupts(chip);
  620. ret = tpm_pm_resume(&dev->dev);
  621. if (!ret)
  622. tpm_continue_selftest(chip);
  623. return ret;
  624. }
  625. static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
  626. {"PNP0C31", 0}, /* TPM */
  627. {"ATM1200", 0}, /* Atmel */
  628. {"IFX0102", 0}, /* Infineon */
  629. {"BCM0101", 0}, /* Broadcom */
  630. {"BCM0102", 0}, /* Broadcom */
  631. {"NSC1200", 0}, /* National */
  632. {"ICO0102", 0}, /* Intel */
  633. /* Add new here */
  634. {"", 0}, /* User Specified */
  635. {"", 0} /* Terminator */
  636. };
  637. MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
  638. static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
  639. {
  640. struct tpm_chip *chip = pnp_get_drvdata(dev);
  641. tpm_dev_vendor_release(chip);
  642. kfree(chip);
  643. }
  644. static struct pnp_driver tis_pnp_driver = {
  645. .name = "tpm_tis",
  646. .id_table = tpm_pnp_tbl,
  647. .probe = tpm_tis_pnp_init,
  648. .suspend = tpm_tis_pnp_suspend,
  649. .resume = tpm_tis_pnp_resume,
  650. .remove = tpm_tis_pnp_remove,
  651. };
  652. #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
  653. module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
  654. sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
  655. MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
  656. #endif
  657. static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
  658. {
  659. return tpm_pm_suspend(&dev->dev, msg);
  660. }
  661. static int tpm_tis_resume(struct platform_device *dev)
  662. {
  663. struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
  664. if (chip->vendor.irq)
  665. tpm_tis_reenable_interrupts(chip);
  666. return tpm_pm_resume(&dev->dev);
  667. }
  668. static struct platform_driver tis_drv = {
  669. .driver = {
  670. .name = "tpm_tis",
  671. .owner = THIS_MODULE,
  672. },
  673. .suspend = tpm_tis_suspend,
  674. .resume = tpm_tis_resume,
  675. };
  676. static struct platform_device *pdev;
  677. static int force;
  678. module_param(force, bool, 0444);
  679. MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
  680. static int __init init_tis(void)
  681. {
  682. int rc;
  683. #ifdef CONFIG_PNP
  684. if (!force)
  685. return pnp_register_driver(&tis_pnp_driver);
  686. #endif
  687. rc = platform_driver_register(&tis_drv);
  688. if (rc < 0)
  689. return rc;
  690. if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
  691. return PTR_ERR(pdev);
  692. if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
  693. platform_device_unregister(pdev);
  694. platform_driver_unregister(&tis_drv);
  695. }
  696. return rc;
  697. }
  698. static void __exit cleanup_tis(void)
  699. {
  700. struct tpm_vendor_specific *i, *j;
  701. struct tpm_chip *chip;
  702. spin_lock(&tis_lock);
  703. list_for_each_entry_safe(i, j, &tis_chips, list) {
  704. chip = to_tpm_chip(i);
  705. tpm_remove_hardware(chip->dev);
  706. iowrite32(~TPM_GLOBAL_INT_ENABLE &
  707. ioread32(chip->vendor.iobase +
  708. TPM_INT_ENABLE(chip->vendor.
  709. locality)),
  710. chip->vendor.iobase +
  711. TPM_INT_ENABLE(chip->vendor.locality));
  712. release_locality(chip, chip->vendor.locality, 1);
  713. if (chip->vendor.irq)
  714. free_irq(chip->vendor.irq, chip);
  715. iounmap(i->iobase);
  716. list_del(&i->list);
  717. }
  718. spin_unlock(&tis_lock);
  719. #ifdef CONFIG_PNP
  720. if (!force) {
  721. pnp_unregister_driver(&tis_pnp_driver);
  722. return;
  723. }
  724. #endif
  725. platform_device_unregister(pdev);
  726. platform_driver_unregister(&tis_drv);
  727. }
  728. module_init(init_tis);
  729. module_exit(cleanup_tis);
  730. MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  731. MODULE_DESCRIPTION("TPM Driver");
  732. MODULE_VERSION("2.0");
  733. MODULE_LICENSE("GPL");