tpm_tis.c revision 9efa54f002cc03fdb4e9d8d508aa996af01c48d0
1/*
2 * Copyright (C) 2005, 2006 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
7 *
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9 *
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
12 *
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 */
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/pnp.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/wait.h>
28#include <linux/acpi.h>
29#include <linux/freezer.h>
30#include "tpm.h"
31
32#define TPM_HEADER_SIZE 10
33
34enum tis_access {
35	TPM_ACCESS_VALID = 0x80,
36	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
37	TPM_ACCESS_REQUEST_PENDING = 0x04,
38	TPM_ACCESS_REQUEST_USE = 0x02,
39};
40
41enum tis_status {
42	TPM_STS_VALID = 0x80,
43	TPM_STS_COMMAND_READY = 0x40,
44	TPM_STS_GO = 0x20,
45	TPM_STS_DATA_AVAIL = 0x10,
46	TPM_STS_DATA_EXPECT = 0x08,
47};
48
49enum tis_int_flags {
50	TPM_GLOBAL_INT_ENABLE = 0x80000000,
51	TPM_INTF_BURST_COUNT_STATIC = 0x100,
52	TPM_INTF_CMD_READY_INT = 0x080,
53	TPM_INTF_INT_EDGE_FALLING = 0x040,
54	TPM_INTF_INT_EDGE_RISING = 0x020,
55	TPM_INTF_INT_LEVEL_LOW = 0x010,
56	TPM_INTF_INT_LEVEL_HIGH = 0x008,
57	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
58	TPM_INTF_STS_VALID_INT = 0x002,
59	TPM_INTF_DATA_AVAIL_INT = 0x001,
60};
61
62enum tis_defaults {
63	TIS_MEM_BASE = 0xFED40000,
64	TIS_MEM_LEN = 0x5000,
65	TIS_SHORT_TIMEOUT = 750,	/* ms */
66	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
67};
68
69#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
70#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
71#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
72#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
73#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
74#define	TPM_STS(l)			(0x0018 | ((l) << 12))
75#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
76
77#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
78#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
79
80static LIST_HEAD(tis_chips);
81static DEFINE_SPINLOCK(tis_lock);
82
83#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
84static int is_itpm(struct pnp_dev *dev)
85{
86	struct acpi_device *acpi = pnp_acpi_device(dev);
87	struct acpi_hardware_id *id;
88
89	list_for_each_entry(id, &acpi->pnp.ids, list) {
90		if (!strcmp("INTC0102", id->id))
91			return 1;
92	}
93
94	return 0;
95}
96#else
97static inline int is_itpm(struct pnp_dev *dev)
98{
99	return 0;
100}
101#endif
102
103static int check_locality(struct tpm_chip *chip, int l)
104{
105	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
106	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
107	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
108		return chip->vendor.locality = l;
109
110	return -1;
111}
112
113static void release_locality(struct tpm_chip *chip, int l, int force)
114{
115	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
116		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
117	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
118		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
119			 chip->vendor.iobase + TPM_ACCESS(l));
120}
121
122static int request_locality(struct tpm_chip *chip, int l)
123{
124	unsigned long stop, timeout;
125	long rc;
126
127	if (check_locality(chip, l) >= 0)
128		return l;
129
130	iowrite8(TPM_ACCESS_REQUEST_USE,
131		 chip->vendor.iobase + TPM_ACCESS(l));
132
133	stop = jiffies + chip->vendor.timeout_a;
134
135	if (chip->vendor.irq) {
136again:
137		timeout = stop - jiffies;
138		if ((long)timeout <= 0)
139			return -1;
140		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
141						      (check_locality
142						       (chip, l) >= 0),
143						      timeout);
144		if (rc > 0)
145			return l;
146		if (rc == -ERESTARTSYS && freezing(current)) {
147			clear_thread_flag(TIF_SIGPENDING);
148			goto again;
149		}
150	} else {
151		/* wait for burstcount */
152		do {
153			if (check_locality(chip, l) >= 0)
154				return l;
155			msleep(TPM_TIMEOUT);
156		}
157		while (time_before(jiffies, stop));
158	}
159	return -1;
160}
161
162static u8 tpm_tis_status(struct tpm_chip *chip)
163{
164	return ioread8(chip->vendor.iobase +
165		       TPM_STS(chip->vendor.locality));
166}
167
168static void tpm_tis_ready(struct tpm_chip *chip)
169{
170	/* this causes the current command to be aborted */
171	iowrite8(TPM_STS_COMMAND_READY,
172		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
173}
174
175static int get_burstcount(struct tpm_chip *chip)
176{
177	unsigned long stop;
178	int burstcnt;
179
180	/* wait for burstcount */
181	/* which timeout value, spec has 2 answers (c & d) */
182	stop = jiffies + chip->vendor.timeout_d;
183	do {
184		burstcnt = ioread8(chip->vendor.iobase +
185				   TPM_STS(chip->vendor.locality) + 1);
186		burstcnt += ioread8(chip->vendor.iobase +
187				    TPM_STS(chip->vendor.locality) +
188				    2) << 8;
189		if (burstcnt)
190			return burstcnt;
191		msleep(TPM_TIMEOUT);
192	} while (time_before(jiffies, stop));
193	return -EBUSY;
194}
195
196static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
197			 wait_queue_head_t *queue)
198{
199	unsigned long stop;
200	long rc;
201	u8 status;
202
203	/* check current status */
204	status = chip->vendor.status(chip);
205	if ((status & mask) == mask)
206		return 0;
207
208	stop = jiffies + timeout;
209
210	if (chip->vendor.irq) {
211again:
212		timeout = stop - jiffies;
213		if ((long)timeout <= 0)
214			return -ETIME;
215		rc = wait_event_interruptible_timeout(*queue,
216						      ((chip->vendor.status(chip)
217						      & mask) == mask),
218						      timeout);
219		if (rc > 0)
220			return 0;
221		if (rc == -ERESTARTSYS && freezing(current)) {
222			clear_thread_flag(TIF_SIGPENDING);
223			goto again;
224		}
225	} else {
226		do {
227			msleep(TPM_TIMEOUT);
228			status = chip->vendor.status(chip);
229			if ((status & mask) == mask)
230				return 0;
231		} while (time_before(jiffies, stop));
232	}
233	return -ETIME;
234}
235
236static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
237{
238	int size = 0, burstcnt;
239	while (size < count &&
240	       wait_for_stat(chip,
241			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
242			     chip->vendor.timeout_c,
243			     &chip->vendor.read_queue)
244	       == 0) {
245		burstcnt = get_burstcount(chip);
246		for (; burstcnt > 0 && size < count; burstcnt--)
247			buf[size++] = ioread8(chip->vendor.iobase +
248					      TPM_DATA_FIFO(chip->vendor.
249							    locality));
250	}
251	return size;
252}
253
254static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
255{
256	int size = 0;
257	int expected, status;
258
259	if (count < TPM_HEADER_SIZE) {
260		size = -EIO;
261		goto out;
262	}
263
264	/* read first 10 bytes, including tag, paramsize, and result */
265	if ((size =
266	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
267		dev_err(chip->dev, "Unable to read header\n");
268		goto out;
269	}
270
271	expected = be32_to_cpu(*(__be32 *) (buf + 2));
272	if (expected > count) {
273		size = -EIO;
274		goto out;
275	}
276
277	if ((size +=
278	     recv_data(chip, &buf[TPM_HEADER_SIZE],
279		       expected - TPM_HEADER_SIZE)) < expected) {
280		dev_err(chip->dev, "Unable to read remainder of result\n");
281		size = -ETIME;
282		goto out;
283	}
284
285	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
286		      &chip->vendor.int_queue);
287	status = tpm_tis_status(chip);
288	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
289		dev_err(chip->dev, "Error left over data\n");
290		size = -EIO;
291		goto out;
292	}
293
294out:
295	tpm_tis_ready(chip);
296	release_locality(chip, chip->vendor.locality, 0);
297	return size;
298}
299
300static int itpm;
301module_param(itpm, bool, 0444);
302MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
303
304/*
305 * If interrupts are used (signaled by an irq set in the vendor structure)
306 * tpm.c can skip polling for the data to be available as the interrupt is
307 * waited for here
308 */
309static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
310{
311	int rc, status, burstcnt;
312	size_t count = 0;
313
314	if (request_locality(chip, 0) < 0)
315		return -EBUSY;
316
317	status = tpm_tis_status(chip);
318	if ((status & TPM_STS_COMMAND_READY) == 0) {
319		tpm_tis_ready(chip);
320		if (wait_for_stat
321		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
322		     &chip->vendor.int_queue) < 0) {
323			rc = -ETIME;
324			goto out_err;
325		}
326	}
327
328	while (count < len - 1) {
329		burstcnt = get_burstcount(chip);
330		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
331			iowrite8(buf[count], chip->vendor.iobase +
332				 TPM_DATA_FIFO(chip->vendor.locality));
333			count++;
334		}
335
336		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
337			      &chip->vendor.int_queue);
338		status = tpm_tis_status(chip);
339		if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
340			rc = -EIO;
341			goto out_err;
342		}
343	}
344
345	/* write last byte */
346	iowrite8(buf[count],
347		 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
348	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
349		      &chip->vendor.int_queue);
350	status = tpm_tis_status(chip);
351	if ((status & TPM_STS_DATA_EXPECT) != 0) {
352		rc = -EIO;
353		goto out_err;
354	}
355
356	return 0;
357
358out_err:
359	tpm_tis_ready(chip);
360	release_locality(chip, chip->vendor.locality, 0);
361	return rc;
362}
363
364/*
365 * If interrupts are used (signaled by an irq set in the vendor structure)
366 * tpm.c can skip polling for the data to be available as the interrupt is
367 * waited for here
368 */
369static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
370{
371	int rc;
372	u32 ordinal;
373
374	rc = tpm_tis_send_data(chip, buf, len);
375	if (rc < 0)
376		return rc;
377
378	/* go and do it */
379	iowrite8(TPM_STS_GO,
380		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
381
382	if (chip->vendor.irq) {
383		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
384		if (wait_for_stat
385		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
386		     tpm_calc_ordinal_duration(chip, ordinal),
387		     &chip->vendor.read_queue) < 0) {
388			rc = -ETIME;
389			goto out_err;
390		}
391	}
392	return len;
393out_err:
394	tpm_tis_ready(chip);
395	release_locality(chip, chip->vendor.locality, 0);
396	return rc;
397}
398
399/*
400 * Early probing for iTPM with STS_DATA_EXPECT flaw.
401 * Try sending command without itpm flag set and if that
402 * fails, repeat with itpm flag set.
403 */
404static int probe_itpm(struct tpm_chip *chip)
405{
406	int rc = 0;
407	u8 cmd_getticks[] = {
408		0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
409		0x00, 0x00, 0x00, 0xf1
410	};
411	size_t len = sizeof(cmd_getticks);
412	int rem_itpm = itpm;
413
414	itpm = 0;
415
416	rc = tpm_tis_send_data(chip, cmd_getticks, len);
417	if (rc == 0)
418		goto out;
419
420	tpm_tis_ready(chip);
421	release_locality(chip, chip->vendor.locality, 0);
422
423	itpm = 1;
424
425	rc = tpm_tis_send_data(chip, cmd_getticks, len);
426	if (rc == 0) {
427		dev_info(chip->dev, "Detected an iTPM.\n");
428		rc = 1;
429	} else
430		rc = -EFAULT;
431
432out:
433	itpm = rem_itpm;
434	tpm_tis_ready(chip);
435	/* some TPMs need a break here otherwise they will not work
436	 * correctly on the immediately subsequent command */
437	msleep(chip->vendor.timeout_b);
438	release_locality(chip, chip->vendor.locality, 0);
439
440	return rc;
441}
442
443static const struct file_operations tis_ops = {
444	.owner = THIS_MODULE,
445	.llseek = no_llseek,
446	.open = tpm_open,
447	.read = tpm_read,
448	.write = tpm_write,
449	.release = tpm_release,
450};
451
452static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
453static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
454static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
455static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
456static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
457static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
458		   NULL);
459static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
460static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
461static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
462static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
463
464static struct attribute *tis_attrs[] = {
465	&dev_attr_pubek.attr,
466	&dev_attr_pcrs.attr,
467	&dev_attr_enabled.attr,
468	&dev_attr_active.attr,
469	&dev_attr_owned.attr,
470	&dev_attr_temp_deactivated.attr,
471	&dev_attr_caps.attr,
472	&dev_attr_cancel.attr,
473	&dev_attr_durations.attr,
474	&dev_attr_timeouts.attr, NULL,
475};
476
477static struct attribute_group tis_attr_grp = {
478	.attrs = tis_attrs
479};
480
481static struct tpm_vendor_specific tpm_tis = {
482	.status = tpm_tis_status,
483	.recv = tpm_tis_recv,
484	.send = tpm_tis_send,
485	.cancel = tpm_tis_ready,
486	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
487	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
488	.req_canceled = TPM_STS_COMMAND_READY,
489	.attr_group = &tis_attr_grp,
490	.miscdev = {
491		    .fops = &tis_ops,},
492};
493
494static irqreturn_t tis_int_probe(int irq, void *dev_id)
495{
496	struct tpm_chip *chip = dev_id;
497	u32 interrupt;
498
499	interrupt = ioread32(chip->vendor.iobase +
500			     TPM_INT_STATUS(chip->vendor.locality));
501
502	if (interrupt == 0)
503		return IRQ_NONE;
504
505	chip->vendor.probed_irq = irq;
506
507	/* Clear interrupts handled with TPM_EOI */
508	iowrite32(interrupt,
509		  chip->vendor.iobase +
510		  TPM_INT_STATUS(chip->vendor.locality));
511	return IRQ_HANDLED;
512}
513
514static irqreturn_t tis_int_handler(int dummy, void *dev_id)
515{
516	struct tpm_chip *chip = dev_id;
517	u32 interrupt;
518	int i;
519
520	interrupt = ioread32(chip->vendor.iobase +
521			     TPM_INT_STATUS(chip->vendor.locality));
522
523	if (interrupt == 0)
524		return IRQ_NONE;
525
526	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
527		wake_up_interruptible(&chip->vendor.read_queue);
528	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
529		for (i = 0; i < 5; i++)
530			if (check_locality(chip, i) >= 0)
531				break;
532	if (interrupt &
533	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
534	     TPM_INTF_CMD_READY_INT))
535		wake_up_interruptible(&chip->vendor.int_queue);
536
537	/* Clear interrupts handled with TPM_EOI */
538	iowrite32(interrupt,
539		  chip->vendor.iobase +
540		  TPM_INT_STATUS(chip->vendor.locality));
541	ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
542	return IRQ_HANDLED;
543}
544
545static int interrupts = 1;
546module_param(interrupts, bool, 0444);
547MODULE_PARM_DESC(interrupts, "Enable interrupts");
548
549static int tpm_tis_init(struct device *dev, resource_size_t start,
550			resource_size_t len, unsigned int irq)
551{
552	u32 vendor, intfcaps, intmask;
553	int rc, i, irq_s, irq_e;
554	struct tpm_chip *chip;
555
556	if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
557		return -ENODEV;
558
559	chip->vendor.iobase = ioremap(start, len);
560	if (!chip->vendor.iobase) {
561		rc = -EIO;
562		goto out_err;
563	}
564
565	/* Default timeouts */
566	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
567	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
568	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
569	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
570
571	if (request_locality(chip, 0) != 0) {
572		rc = -ENODEV;
573		goto out_err;
574	}
575
576	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
577
578	dev_info(dev,
579		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
580		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
581
582	if (!itpm) {
583		itpm = probe_itpm(chip);
584		if (itpm < 0) {
585			rc = -ENODEV;
586			goto out_err;
587		}
588	}
589
590	if (itpm)
591		dev_info(dev, "Intel iTPM workaround enabled\n");
592
593
594	/* Figure out the capabilities */
595	intfcaps =
596	    ioread32(chip->vendor.iobase +
597		     TPM_INTF_CAPS(chip->vendor.locality));
598	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
599		intfcaps);
600	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
601		dev_dbg(dev, "\tBurst Count Static\n");
602	if (intfcaps & TPM_INTF_CMD_READY_INT)
603		dev_dbg(dev, "\tCommand Ready Int Support\n");
604	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
605		dev_dbg(dev, "\tInterrupt Edge Falling\n");
606	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
607		dev_dbg(dev, "\tInterrupt Edge Rising\n");
608	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
609		dev_dbg(dev, "\tInterrupt Level Low\n");
610	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
611		dev_dbg(dev, "\tInterrupt Level High\n");
612	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
613		dev_dbg(dev, "\tLocality Change Int Support\n");
614	if (intfcaps & TPM_INTF_STS_VALID_INT)
615		dev_dbg(dev, "\tSts Valid Int Support\n");
616	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
617		dev_dbg(dev, "\tData Avail Int Support\n");
618
619	/* get the timeouts before testing for irqs */
620	if (tpm_get_timeouts(chip)) {
621		dev_err(dev, "Could not get TPM timeouts and durations\n");
622		rc = -ENODEV;
623		goto out_err;
624	}
625
626	if (tpm_do_selftest(chip)) {
627		dev_err(dev, "TPM self test failed\n");
628		rc = -ENODEV;
629		goto out_err;
630	}
631
632	/* INTERRUPT Setup */
633	init_waitqueue_head(&chip->vendor.read_queue);
634	init_waitqueue_head(&chip->vendor.int_queue);
635
636	intmask =
637	    ioread32(chip->vendor.iobase +
638		     TPM_INT_ENABLE(chip->vendor.locality));
639
640	intmask |= TPM_INTF_CMD_READY_INT
641	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
642	    | TPM_INTF_STS_VALID_INT;
643
644	iowrite32(intmask,
645		  chip->vendor.iobase +
646		  TPM_INT_ENABLE(chip->vendor.locality));
647	if (interrupts)
648		chip->vendor.irq = irq;
649	if (interrupts && !chip->vendor.irq) {
650		irq_s =
651		    ioread8(chip->vendor.iobase +
652			    TPM_INT_VECTOR(chip->vendor.locality));
653		if (irq_s) {
654			irq_e = irq_s;
655		} else {
656			irq_s = 3;
657			irq_e = 15;
658		}
659
660		for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
661			iowrite8(i, chip->vendor.iobase +
662				 TPM_INT_VECTOR(chip->vendor.locality));
663			if (request_irq
664			    (i, tis_int_probe, IRQF_SHARED,
665			     chip->vendor.miscdev.name, chip) != 0) {
666				dev_info(chip->dev,
667					 "Unable to request irq: %d for probe\n",
668					 i);
669				continue;
670			}
671
672			/* Clear all existing */
673			iowrite32(ioread32
674				  (chip->vendor.iobase +
675				   TPM_INT_STATUS(chip->vendor.locality)),
676				  chip->vendor.iobase +
677				  TPM_INT_STATUS(chip->vendor.locality));
678
679			/* Turn on */
680			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
681				  chip->vendor.iobase +
682				  TPM_INT_ENABLE(chip->vendor.locality));
683
684			chip->vendor.probed_irq = 0;
685
686			/* Generate Interrupts */
687			tpm_gen_interrupt(chip);
688
689			chip->vendor.irq = chip->vendor.probed_irq;
690
691			/* free_irq will call into tis_int_probe;
692			   clear all irqs we haven't seen while doing
693			   tpm_gen_interrupt */
694			iowrite32(ioread32
695				  (chip->vendor.iobase +
696				   TPM_INT_STATUS(chip->vendor.locality)),
697				  chip->vendor.iobase +
698				  TPM_INT_STATUS(chip->vendor.locality));
699
700			/* Turn off */
701			iowrite32(intmask,
702				  chip->vendor.iobase +
703				  TPM_INT_ENABLE(chip->vendor.locality));
704			free_irq(i, chip);
705		}
706	}
707	if (chip->vendor.irq) {
708		iowrite8(chip->vendor.irq,
709			 chip->vendor.iobase +
710			 TPM_INT_VECTOR(chip->vendor.locality));
711		if (request_irq
712		    (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
713		     chip->vendor.miscdev.name, chip) != 0) {
714			dev_info(chip->dev,
715				 "Unable to request irq: %d for use\n",
716				 chip->vendor.irq);
717			chip->vendor.irq = 0;
718		} else {
719			/* Clear all existing */
720			iowrite32(ioread32
721				  (chip->vendor.iobase +
722				   TPM_INT_STATUS(chip->vendor.locality)),
723				  chip->vendor.iobase +
724				  TPM_INT_STATUS(chip->vendor.locality));
725
726			/* Turn on */
727			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
728				  chip->vendor.iobase +
729				  TPM_INT_ENABLE(chip->vendor.locality));
730		}
731	}
732
733	INIT_LIST_HEAD(&chip->vendor.list);
734	spin_lock(&tis_lock);
735	list_add(&chip->vendor.list, &tis_chips);
736	spin_unlock(&tis_lock);
737
738
739	return 0;
740out_err:
741	if (chip->vendor.iobase)
742		iounmap(chip->vendor.iobase);
743	tpm_remove_hardware(chip->dev);
744	return rc;
745}
746
747static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
748{
749	u32 intmask;
750
751	/* reenable interrupts that device may have lost or
752	   BIOS/firmware may have disabled */
753	iowrite8(chip->vendor.irq, chip->vendor.iobase +
754		 TPM_INT_VECTOR(chip->vendor.locality));
755
756	intmask =
757	    ioread32(chip->vendor.iobase +
758		     TPM_INT_ENABLE(chip->vendor.locality));
759
760	intmask |= TPM_INTF_CMD_READY_INT
761	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
762	    | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
763
764	iowrite32(intmask,
765		  chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
766}
767
768
769#ifdef CONFIG_PNP
770static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
771				      const struct pnp_device_id *pnp_id)
772{
773	resource_size_t start, len;
774	unsigned int irq = 0;
775
776	start = pnp_mem_start(pnp_dev, 0);
777	len = pnp_mem_len(pnp_dev, 0);
778
779	if (pnp_irq_valid(pnp_dev, 0))
780		irq = pnp_irq(pnp_dev, 0);
781	else
782		interrupts = 0;
783
784	if (is_itpm(pnp_dev))
785		itpm = 1;
786
787	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
788}
789
790static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
791{
792	return tpm_pm_suspend(&dev->dev, msg);
793}
794
795static int tpm_tis_pnp_resume(struct pnp_dev *dev)
796{
797	struct tpm_chip *chip = pnp_get_drvdata(dev);
798	int ret;
799
800	if (chip->vendor.irq)
801		tpm_tis_reenable_interrupts(chip);
802
803	ret = tpm_pm_resume(&dev->dev);
804	if (!ret)
805		tpm_do_selftest(chip);
806
807	return ret;
808}
809
810static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
811	{"PNP0C31", 0},		/* TPM */
812	{"ATM1200", 0},		/* Atmel */
813	{"IFX0102", 0},		/* Infineon */
814	{"BCM0101", 0},		/* Broadcom */
815	{"BCM0102", 0},		/* Broadcom */
816	{"NSC1200", 0},		/* National */
817	{"ICO0102", 0},		/* Intel */
818	/* Add new here */
819	{"", 0},		/* User Specified */
820	{"", 0}			/* Terminator */
821};
822MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
823
824static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
825{
826	struct tpm_chip *chip = pnp_get_drvdata(dev);
827
828	tpm_dev_vendor_release(chip);
829
830	kfree(chip);
831}
832
833
834static struct pnp_driver tis_pnp_driver = {
835	.name = "tpm_tis",
836	.id_table = tpm_pnp_tbl,
837	.probe = tpm_tis_pnp_init,
838	.suspend = tpm_tis_pnp_suspend,
839	.resume = tpm_tis_pnp_resume,
840	.remove = tpm_tis_pnp_remove,
841};
842
843#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
844module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
845		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
846MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
847#endif
848static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
849{
850	return tpm_pm_suspend(&dev->dev, msg);
851}
852
853static int tpm_tis_resume(struct platform_device *dev)
854{
855	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
856
857	if (chip->vendor.irq)
858		tpm_tis_reenable_interrupts(chip);
859
860	return tpm_pm_resume(&dev->dev);
861}
862static struct platform_driver tis_drv = {
863	.driver = {
864		.name = "tpm_tis",
865		.owner		= THIS_MODULE,
866	},
867	.suspend = tpm_tis_suspend,
868	.resume = tpm_tis_resume,
869};
870
871static struct platform_device *pdev;
872
873static int force;
874module_param(force, bool, 0444);
875MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
876static int __init init_tis(void)
877{
878	int rc;
879#ifdef CONFIG_PNP
880	if (!force)
881		return pnp_register_driver(&tis_pnp_driver);
882#endif
883
884	rc = platform_driver_register(&tis_drv);
885	if (rc < 0)
886		return rc;
887	if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
888		return PTR_ERR(pdev);
889	if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
890		platform_device_unregister(pdev);
891		platform_driver_unregister(&tis_drv);
892	}
893	return rc;
894}
895
896static void __exit cleanup_tis(void)
897{
898	struct tpm_vendor_specific *i, *j;
899	struct tpm_chip *chip;
900	spin_lock(&tis_lock);
901	list_for_each_entry_safe(i, j, &tis_chips, list) {
902		chip = to_tpm_chip(i);
903		tpm_remove_hardware(chip->dev);
904		iowrite32(~TPM_GLOBAL_INT_ENABLE &
905			  ioread32(chip->vendor.iobase +
906				   TPM_INT_ENABLE(chip->vendor.
907						  locality)),
908			  chip->vendor.iobase +
909			  TPM_INT_ENABLE(chip->vendor.locality));
910		release_locality(chip, chip->vendor.locality, 1);
911		if (chip->vendor.irq)
912			free_irq(chip->vendor.irq, chip);
913		iounmap(i->iobase);
914		list_del(&i->list);
915	}
916	spin_unlock(&tis_lock);
917#ifdef CONFIG_PNP
918	if (!force) {
919		pnp_unregister_driver(&tis_pnp_driver);
920		return;
921	}
922#endif
923	platform_device_unregister(pdev);
924	platform_driver_unregister(&tis_drv);
925}
926
927module_init(init_tis);
928module_exit(cleanup_tis);
929MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
930MODULE_DESCRIPTION("TPM Driver");
931MODULE_VERSION("2.0");
932MODULE_LICENSE("GPL");
933