2 * Copyright (C) 2004 IBM Corporation
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Dave Safford <safford@watson.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
10 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
12 * Device driver for TCG/TCPA TPM (trusted platform module).
13 * Specifications at www.trustedcomputinggroup.org
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation, version 2 of the
20 * Note, the TPM chip is not interrupt driven (only polling)
21 * and can have very long timeouts (minutes!). Hence the unusual
26 #include <linux/poll.h>
27 #include <linux/slab.h>
28 #include <linux/jiffies.h>
29 #include <linux/mutex.h>
30 #include <linux/spinlock.h>
31 #include <linux/freezer.h>
36 TPM_MINOR = 224, /* officially assigned */
38 TPM_NUM_DEVICES = 256,
48 #define TPM_MAX_ORDINAL 243
49 #define TPM_MAX_PROTECTED_ORDINAL 12
50 #define TPM_PROTECTED_ORDINAL_MASK 0xFF
53 * Bug workaround - some TPM's don't flush the most
54 * recently changed pcr on suspend, so force the flush
55 * with an extend to the selected _unused_ non-volatile pcr.
57 static int tpm_suspend_pcr;
58 module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
59 MODULE_PARM_DESC(suspend_pcr,
60 "PCR to use for dummy writes to faciltate flush on suspend.");
62 static LIST_HEAD(tpm_chip_list);
63 static DEFINE_SPINLOCK(driver_lock);
64 static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
67 * Array with one entry per ordinal defining the maximum amount
68 * of time the chip could take to return the result. The ordinal
69 * designation of short, medium or long is defined in a table in
70 * TCG Specification TPM Main Part 2 TPM Structures Section 17. The
71 * values of the SHORT, MEDIUM, and LONG durations are retrieved
72 * from the chip during initialization with a call to tpm_get_timeouts.
74 static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
75 TPM_UNDEFINED, /* 0 */
80 TPM_UNDEFINED, /* 5 */
89 static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
90 TPM_UNDEFINED, /* 0 */
95 TPM_UNDEFINED, /* 5 */
145 TPM_UNDEFINED, /* 55 */
165 TPM_UNDEFINED, /* 75 */
175 TPM_UNDEFINED, /* 85 */
185 TPM_UNDEFINED, /* 95 */
190 TPM_MEDIUM, /* 100 */
195 TPM_UNDEFINED, /* 105 */
225 TPM_UNDEFINED, /* 135 */
235 TPM_UNDEFINED, /* 145 */
245 TPM_UNDEFINED, /* 155 */
255 TPM_UNDEFINED, /* 165 */
265 TPM_UNDEFINED, /* 175 */
270 TPM_MEDIUM, /* 180 */
275 TPM_MEDIUM, /* 185 */
280 TPM_UNDEFINED, /* 190 */
285 TPM_UNDEFINED, /* 195 */
300 TPM_MEDIUM, /* 210 */
305 TPM_UNDEFINED, /* 215 */
315 TPM_UNDEFINED, /* 225 */
325 TPM_UNDEFINED, /* 235 */
335 static void user_reader_timeout(unsigned long ptr)
337 struct tpm_chip *chip = (struct tpm_chip *) ptr;
339 schedule_work(&chip->work);
342 static void timeout_work(struct work_struct *work)
344 struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
346 mutex_lock(&chip->buffer_mutex);
347 atomic_set(&chip->data_pending, 0);
348 memset(chip->data_buffer, 0, TPM_BUFSIZE);
349 mutex_unlock(&chip->buffer_mutex);
352 static void needs_resume(struct tpm_chip *chip)
354 mutex_lock(&chip->resume_mutex);
355 chip->resume_time = jiffies;
356 chip->needs_resume = 1;
357 mutex_unlock(&chip->resume_mutex);
360 #define TPM_ORD_CONTINUE_SELFTEST 83
361 #define CONTINUE_SELFTEST_RESULT_SIZE 10
363 #define TPM_INTERNAL_RESULT_SIZE 200
364 #define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
365 #define TPM_ORD_GET_CAP cpu_to_be32(101)
367 static const struct tpm_input_header tpm_getcap_header = {
368 .tag = TPM_TAG_RQU_COMMAND,
369 .length = cpu_to_be32(22),
370 .ordinal = TPM_ORD_GET_CAP
373 static struct tpm_input_header continue_selftest_header = {
374 .tag = TPM_TAG_RQU_COMMAND,
375 .length = cpu_to_be32(10),
376 .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
380 * Internal kernel interface to transmit TPM commands
382 static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
389 if (bufsiz > TPM_BUFSIZE)
390 bufsiz = TPM_BUFSIZE;
392 count = be32_to_cpu(*((__be32 *) (buf + 2)));
393 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
396 if (count > bufsiz) {
398 "invalid count value %x %zx \n", count, bufsiz);
402 mutex_lock(&chip->tpm_mutex);
404 if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
406 "tpm_transmit: tpm_send: error %zd\n", rc);
410 if (chip->vendor.irq)
413 stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
415 u8 status = chip->vendor.status(chip);
416 if ((status & chip->vendor.req_complete_mask) ==
417 chip->vendor.req_complete_val)
420 if ((status == chip->vendor.req_canceled)) {
421 dev_err(chip->dev, "Operation Canceled\n");
426 msleep(TPM_TIMEOUT); /* CHECK */
428 } while (time_before(jiffies, stop));
430 chip->vendor.cancel(chip);
431 dev_err(chip->dev, "Operation Timed out\n");
436 rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
439 "tpm_transmit: tpm_recv: error %zd\n", rc);
441 mutex_unlock(&chip->tpm_mutex);
446 * tpm_continue_selftest -- run TPM's selftest
447 * @chip: TPM chip to use
449 * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
452 int tpm_continue_selftest(struct tpm_chip *chip)
455 struct tpm_cmd_t cmd;
457 cmd.header.in = continue_selftest_header;
458 rc = tpm_transmit(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE);
461 EXPORT_SYMBOL_GPL(tpm_continue_selftest);
463 /* The maximum time in milliseconds that the TPM self test will take to
464 * complete. TODO(semenzato): 1s should be plenty for all TPMs, but how can we
467 #define TPM_SELF_TEST_DURATION_MSEC 1000
469 static void resume_if_needed(struct tpm_chip *chip)
471 mutex_lock(&chip->resume_mutex);
472 if (chip->needs_resume) {
473 /* If it's been TPM_SELF_TEST_DURATION_MSEC msec since resume,
474 * then selftest has completed and we don't need to wait.
476 if (jiffies - chip->resume_time <
477 msecs_to_jiffies(TPM_SELF_TEST_DURATION_MSEC)) {
478 dev_info(chip->dev, "waiting for TPM self test");
479 tpm_continue_selftest(chip);
481 chip->needs_resume = 0;
482 dev_info(chip->dev, "TPM delayed resume completed");
484 mutex_unlock(&chip->resume_mutex);
487 static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
488 int len, const char *desc)
492 resume_if_needed(chip);
494 len = tpm_transmit(chip,(u8 *) cmd, len);
497 else if (len < TPM_HEADER_SIZE)
500 err = be32_to_cpu(cmd->header.out.return_code);
502 dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
508 * Returns max number of jiffies to wait
510 unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
513 int duration_idx = TPM_UNDEFINED;
516 if (ordinal < TPM_MAX_ORDINAL)
517 duration_idx = tpm_ordinal_duration[ordinal];
518 else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
519 TPM_MAX_PROTECTED_ORDINAL)
521 tpm_protected_ordinal_duration[ordinal &
522 TPM_PROTECTED_ORDINAL_MASK];
524 if (duration_idx != TPM_UNDEFINED)
525 duration = chip->vendor.duration[duration_idx];
531 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
533 #define TPM_DIGEST_SIZE 20
534 #define TPM_RET_CODE_IDX 6
536 enum tpm_capabilities {
537 TPM_CAP_FLAG = cpu_to_be32(4),
538 TPM_CAP_PROP = cpu_to_be32(5),
539 CAP_VERSION_1_1 = cpu_to_be32(0x06),
540 CAP_VERSION_1_2 = cpu_to_be32(0x1A)
543 enum tpm_sub_capabilities {
544 TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
545 TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
546 TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
547 TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
548 TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
549 TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
550 TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
554 ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
557 struct tpm_cmd_t tpm_cmd;
559 struct tpm_chip *chip = dev_get_drvdata(dev);
561 tpm_cmd.header.in = tpm_getcap_header;
562 if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
563 tpm_cmd.params.getcap_in.cap = subcap_id;
564 /*subcap field not necessary */
565 tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
566 tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
568 if (subcap_id == TPM_CAP_FLAG_PERM ||
569 subcap_id == TPM_CAP_FLAG_VOL)
570 tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
572 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
573 tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
574 tpm_cmd.params.getcap_in.subcap = subcap_id;
576 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
578 *cap = tpm_cmd.params.getcap_out.cap;
582 void tpm_gen_interrupt(struct tpm_chip *chip)
584 struct tpm_cmd_t tpm_cmd;
587 tpm_cmd.header.in = tpm_getcap_header;
588 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
589 tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
590 tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
592 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
593 "attempting to determine the timeouts");
595 EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
597 int tpm_get_timeouts(struct tpm_chip *chip)
599 struct tpm_cmd_t tpm_cmd;
600 struct timeout_t *timeout_cap;
601 struct duration_t *duration_cap;
604 unsigned int scale = 1;
606 tpm_cmd.header.in = tpm_getcap_header;
607 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
608 tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
609 tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
611 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
612 "attempting to determine the timeouts");
616 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
617 be32_to_cpu(tpm_cmd.header.out.length)
618 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
621 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
622 /* Don't overwrite default if value is 0 */
623 timeout = be32_to_cpu(timeout_cap->a);
624 if (timeout && timeout < 1000) {
625 /* timeouts in msec rather usec */
627 chip->vendor.timeout_adjusted = true;
630 chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
631 timeout = be32_to_cpu(timeout_cap->b);
633 chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
634 timeout = be32_to_cpu(timeout_cap->c);
636 chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
637 timeout = be32_to_cpu(timeout_cap->d);
639 chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
642 tpm_cmd.header.in = tpm_getcap_header;
643 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
644 tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
645 tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
647 rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
648 "attempting to determine the durations");
652 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
653 be32_to_cpu(tpm_cmd.header.out.length)
654 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
657 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
658 chip->vendor.duration[TPM_SHORT] =
659 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
660 chip->vendor.duration[TPM_MEDIUM] =
661 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
662 chip->vendor.duration[TPM_LONG] =
663 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
665 /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
666 * value wrong and apparently reports msecs rather than usecs. So we
667 * fix up the resulting too-small TPM_SHORT value to make things work.
668 * We also scale the TPM_MEDIUM and -_LONG values by 1000.
670 if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
671 chip->vendor.duration[TPM_SHORT] = HZ;
672 chip->vendor.duration[TPM_MEDIUM] *= 1000;
673 chip->vendor.duration[TPM_LONG] *= 1000;
674 chip->vendor.duration_adjusted = true;
675 dev_info(chip->dev, "Adjusting TPM timeout parameters.");
679 EXPORT_SYMBOL_GPL(tpm_get_timeouts);
681 ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
687 rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
688 "attempting to determine the permanent enabled state");
692 rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
695 EXPORT_SYMBOL_GPL(tpm_show_enabled);
697 ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
703 rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
704 "attempting to determine the permanent active state");
708 rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
711 EXPORT_SYMBOL_GPL(tpm_show_active);
713 ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
719 rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
720 "attempting to determine the owner state");
724 rc = sprintf(buf, "%d\n", cap.owned);
727 EXPORT_SYMBOL_GPL(tpm_show_owned);
729 ssize_t tpm_show_temp_deactivated(struct device * dev,
730 struct device_attribute * attr, char *buf)
735 rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
736 "attempting to determine the temporary state");
740 rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
743 EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
746 * tpm_chip_find_get - return tpm_chip for given chip number
748 static struct tpm_chip *tpm_chip_find_get(int chip_num)
750 struct tpm_chip *pos, *chip = NULL;
753 list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
754 if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
757 if (try_module_get(pos->dev->driver->owner)) {
766 #define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
767 #define READ_PCR_RESULT_SIZE 30
768 static struct tpm_input_header pcrread_header = {
769 .tag = TPM_TAG_RQU_COMMAND,
770 .length = cpu_to_be32(14),
771 .ordinal = TPM_ORDINAL_PCRREAD
774 static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
777 struct tpm_cmd_t cmd;
779 cmd.header.in = pcrread_header;
780 cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
781 rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
782 "attempting to read a pcr value");
785 memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
791 * tpm_pcr_read - read a pcr value
792 * @chip_num: tpm idx # or ANY
793 * @pcr_idx: pcr idx to retrieve
794 * @res_buf: TPM_PCR value
795 * size of res_buf is 20 bytes (or NULL if you don't care)
797 * The TPM driver should be built-in, but for whatever reason it
798 * isn't, protect against the chip disappearing, by incrementing
799 * the module usage count.
801 int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
803 struct tpm_chip *chip;
806 chip = tpm_chip_find_get(chip_num);
809 rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
813 EXPORT_SYMBOL_GPL(tpm_pcr_read);
816 * tpm_pcr_extend - extend pcr value with hash
817 * @chip_num: tpm idx # or AN&
818 * @pcr_idx: pcr idx to extend
819 * @hash: hash value used to extend pcr value
821 * The TPM driver should be built-in, but for whatever reason it
822 * isn't, protect against the chip disappearing, by incrementing
823 * the module usage count.
825 #define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
826 #define EXTEND_PCR_RESULT_SIZE 34
827 static struct tpm_input_header pcrextend_header = {
828 .tag = TPM_TAG_RQU_COMMAND,
829 .length = cpu_to_be32(34),
830 .ordinal = TPM_ORD_PCR_EXTEND
833 int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
835 struct tpm_cmd_t cmd;
837 struct tpm_chip *chip;
839 chip = tpm_chip_find_get(chip_num);
843 cmd.header.in = pcrextend_header;
844 cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
845 memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
846 rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
847 "attempting extend a PCR value");
852 EXPORT_SYMBOL_GPL(tpm_pcr_extend);
855 * tpm_do_selftest - have the TPM continue its selftest and wait until it
856 * can receive further commands
857 * @chip: TPM chip to use
859 * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
862 int tpm_do_selftest(struct tpm_chip *chip)
865 u8 digest[TPM_DIGEST_SIZE];
867 unsigned int delay_msec = 1000;
868 unsigned long duration;
870 duration = tpm_calc_ordinal_duration(chip,
871 TPM_ORD_CONTINUE_SELFTEST);
873 loops = jiffies_to_msecs(duration) / delay_msec;
875 rc = tpm_continue_selftest(chip);
876 /* This may fail if there was no TPM driver during a suspend/resume
877 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
883 rc = __tpm_pcr_read(chip, 0, digest);
884 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
886 "TPM is disabled/deactivated (0x%X)\n", rc);
887 /* TPM is disabled and/or deactivated; driver can
888 * proceed and TPM does handle commands for
889 * suspend/resume correctly
893 if (rc != TPM_WARN_DOING_SELFTEST)
896 } while (--loops > 0);
900 EXPORT_SYMBOL_GPL(tpm_do_selftest);
902 int tpm_send(u32 chip_num, void *cmd, size_t buflen)
904 struct tpm_chip *chip;
907 chip = tpm_chip_find_get(chip_num);
911 rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
916 EXPORT_SYMBOL_GPL(tpm_send);
918 ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
922 u8 digest[TPM_DIGEST_SIZE];
926 struct tpm_chip *chip = dev_get_drvdata(dev);
928 rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
929 "attempting to determine the number of PCRS");
933 num_pcrs = be32_to_cpu(cap.num_pcrs);
934 for (i = 0; i < num_pcrs; i++) {
935 rc = __tpm_pcr_read(chip, i, digest);
938 str += sprintf(str, "PCR-%02d: ", i);
939 for (j = 0; j < TPM_DIGEST_SIZE; j++)
940 str += sprintf(str, "%02X ", digest[j]);
941 str += sprintf(str, "\n");
945 EXPORT_SYMBOL_GPL(tpm_show_pcrs);
947 #define READ_PUBEK_RESULT_SIZE 314
948 #define TPM_ORD_READPUBEK cpu_to_be32(124)
949 struct tpm_input_header tpm_readpubek_header = {
950 .tag = TPM_TAG_RQU_COMMAND,
951 .length = cpu_to_be32(30),
952 .ordinal = TPM_ORD_READPUBEK
955 ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
959 struct tpm_cmd_t tpm_cmd;
964 struct tpm_chip *chip = dev_get_drvdata(dev);
966 tpm_cmd.header.in = tpm_readpubek_header;
967 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
968 "attempting to read the PUBEK");
973 ignore header 10 bytes
974 algorithm 32 bits (1 == RSA )
977 parameters (RSA 12->bytes: keybit, #primes, expbit)
980 ignore checksum 20 bytes
982 data = tpm_cmd.params.readpubek_out_buffer;
985 "Algorithm: %02X %02X %02X %02X\n"
986 "Encscheme: %02X %02X\n"
987 "Sigscheme: %02X %02X\n"
988 "Parameters: %02X %02X %02X %02X "
989 "%02X %02X %02X %02X "
990 "%02X %02X %02X %02X\n"
991 "Modulus length: %d\n"
993 data[0], data[1], data[2], data[3],
996 data[12], data[13], data[14], data[15],
997 data[16], data[17], data[18], data[19],
998 data[20], data[21], data[22], data[23],
999 be32_to_cpu(*((__be32 *) (data + 24))));
1001 for (i = 0; i < 256; i++) {
1002 str += sprintf(str, "%02X ", data[i + 28]);
1003 if ((i + 1) % 16 == 0)
1004 str += sprintf(str, "\n");
1010 EXPORT_SYMBOL_GPL(tpm_show_pubek);
1013 ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
1020 rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
1021 "attempting to determine the manufacturer");
1024 str += sprintf(str, "Manufacturer: 0x%x\n",
1025 be32_to_cpu(cap.manufacturer_id));
1027 rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
1028 "attempting to determine the 1.1 version");
1032 "TCG version: %d.%d\nFirmware version: %d.%d\n",
1033 cap.tpm_version.Major, cap.tpm_version.Minor,
1034 cap.tpm_version.revMajor, cap.tpm_version.revMinor);
1037 EXPORT_SYMBOL_GPL(tpm_show_caps);
1039 ssize_t tpm_show_caps_1_2(struct device * dev,
1040 struct device_attribute * attr, char *buf)
1046 rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
1047 "attempting to determine the manufacturer");
1050 str += sprintf(str, "Manufacturer: 0x%x\n",
1051 be32_to_cpu(cap.manufacturer_id));
1052 rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
1053 "attempting to determine the 1.2 version");
1057 "TCG version: %d.%d\nFirmware version: %d.%d\n",
1058 cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
1059 cap.tpm_version_1_2.revMajor,
1060 cap.tpm_version_1_2.revMinor);
1063 EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
1065 ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
1068 struct tpm_chip *chip = dev_get_drvdata(dev);
1070 if (chip->vendor.duration[TPM_LONG] == 0)
1073 return sprintf(buf, "%d %d %d [%s]\n",
1074 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
1075 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
1076 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
1077 chip->vendor.duration_adjusted
1078 ? "adjusted" : "original");
1080 EXPORT_SYMBOL_GPL(tpm_show_durations);
1082 ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
1085 struct tpm_chip *chip = dev_get_drvdata(dev);
1087 return sprintf(buf, "%d %d %d %d [%s]\n",
1088 jiffies_to_usecs(chip->vendor.timeout_a),
1089 jiffies_to_usecs(chip->vendor.timeout_b),
1090 jiffies_to_usecs(chip->vendor.timeout_c),
1091 jiffies_to_usecs(chip->vendor.timeout_d),
1092 chip->vendor.timeout_adjusted
1093 ? "adjusted" : "original");
1095 EXPORT_SYMBOL_GPL(tpm_show_timeouts);
1097 ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
1098 const char *buf, size_t count)
1100 struct tpm_chip *chip = dev_get_drvdata(dev);
1104 chip->vendor.cancel(chip);
1107 EXPORT_SYMBOL_GPL(tpm_store_cancel);
1109 int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
1110 wait_queue_head_t *queue)
1116 /* check current status */
1117 status = chip->vendor.status(chip);
1118 if ((status & mask) == mask)
1121 stop = jiffies + timeout;
1123 if (chip->vendor.irq) {
1125 timeout = stop - jiffies;
1126 if ((long)timeout <= 0)
1128 rc = wait_event_interruptible_timeout(*queue,
1129 ((chip->vendor.status(chip)
1134 if (rc == -ERESTARTSYS && freezing(current)) {
1135 clear_thread_flag(TIF_SIGPENDING);
1140 msleep(TPM_TIMEOUT);
1141 status = chip->vendor.status(chip);
1142 if ((status & mask) == mask)
1144 } while (time_before(jiffies, stop));
1148 EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
1150 * Device file system interface to the TPM
1152 * It's assured that the chip will be opened just once,
1153 * by the check of is_open variable, which is protected
1156 int tpm_open(struct inode *inode, struct file *file)
1158 int minor = iminor(inode);
1159 struct tpm_chip *chip = NULL, *pos;
1162 list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
1163 if (pos->vendor.miscdev.minor == minor) {
1165 get_device(chip->dev);
1174 if (test_and_set_bit(0, &chip->is_open)) {
1175 dev_dbg(chip->dev, "Another process owns this TPM\n");
1176 put_device(chip->dev);
1180 chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
1181 if (chip->data_buffer == NULL) {
1182 clear_bit(0, &chip->is_open);
1183 put_device(chip->dev);
1187 atomic_set(&chip->data_pending, 0);
1189 file->private_data = chip;
1192 EXPORT_SYMBOL_GPL(tpm_open);
1195 * Called on file close
1197 int tpm_release(struct inode *inode, struct file *file)
1199 struct tpm_chip *chip = file->private_data;
1201 del_singleshot_timer_sync(&chip->user_read_timer);
1202 flush_work_sync(&chip->work);
1203 file->private_data = NULL;
1204 atomic_set(&chip->data_pending, 0);
1205 kfree(chip->data_buffer);
1206 clear_bit(0, &chip->is_open);
1207 put_device(chip->dev);
1210 EXPORT_SYMBOL_GPL(tpm_release);
1212 ssize_t tpm_write(struct file *file, const char __user *buf,
1213 size_t size, loff_t *off)
1215 struct tpm_chip *chip = file->private_data;
1216 size_t in_size = size, out_size;
1218 /* cannot perform a write until the read has cleared
1219 either via tpm_read or a user_read_timer timeout */
1220 while (atomic_read(&chip->data_pending) != 0)
1221 msleep(TPM_TIMEOUT);
1223 resume_if_needed(chip);
1225 mutex_lock(&chip->buffer_mutex);
1227 if (in_size > TPM_BUFSIZE)
1228 in_size = TPM_BUFSIZE;
1231 (chip->data_buffer, (void __user *) buf, in_size)) {
1232 mutex_unlock(&chip->buffer_mutex);
1236 /* atomic tpm command send and result receive */
1237 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
1239 atomic_set(&chip->data_pending, out_size);
1240 mutex_unlock(&chip->buffer_mutex);
1242 /* Set a timeout by which the reader must come claim the result */
1243 mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
1247 EXPORT_SYMBOL_GPL(tpm_write);
1249 ssize_t tpm_read(struct file *file, char __user *buf,
1250 size_t size, loff_t *off)
1252 struct tpm_chip *chip = file->private_data;
1256 del_singleshot_timer_sync(&chip->user_read_timer);
1257 flush_work_sync(&chip->work);
1258 ret_size = atomic_read(&chip->data_pending);
1259 /* TODO(wad): atomic_set should come AFTER the buffer is copied. */
1260 atomic_set(&chip->data_pending, 0);
1261 if (ret_size > 0) { /* relay data */
1262 ssize_t orig_ret_size = ret_size;
1263 if (size < ret_size)
1266 mutex_lock(&chip->buffer_mutex);
1267 rc = copy_to_user(buf, chip->data_buffer, ret_size);
1268 memset(chip->data_buffer, 0, orig_ret_size);
1272 mutex_unlock(&chip->buffer_mutex);
1277 EXPORT_SYMBOL_GPL(tpm_read);
1279 void tpm_remove_hardware(struct device *dev)
1281 struct tpm_chip *chip = dev_get_drvdata(dev);
1284 dev_err(dev, "No device data found\n");
1288 spin_lock(&driver_lock);
1289 list_del_rcu(&chip->list);
1290 spin_unlock(&driver_lock);
1293 misc_deregister(&chip->vendor.miscdev);
1294 sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
1295 tpm_bios_log_teardown(chip->bios_dir);
1297 /* write it this way to be explicit (chip->dev == dev) */
1298 put_device(chip->dev);
1300 EXPORT_SYMBOL_GPL(tpm_remove_hardware);
1302 #define TPM_ORD_SAVESTATE cpu_to_be32(152)
1303 #define SAVESTATE_RESULT_SIZE 10
1305 static struct tpm_input_header savestate_header = {
1306 .tag = TPM_TAG_RQU_COMMAND,
1307 .length = cpu_to_be32(10),
1308 .ordinal = TPM_ORD_SAVESTATE
1312 * We are about to suspend. Save the TPM state
1313 * so that it can be restored.
1315 int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
1317 struct tpm_chip *chip = dev_get_drvdata(dev);
1318 struct tpm_cmd_t cmd;
1321 u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
1326 /* for buggy tpm, flush pcrs with extend to selected dummy */
1327 if (tpm_suspend_pcr) {
1328 cmd.header.in = pcrextend_header;
1329 cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
1330 memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
1332 rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
1333 "extending dummy pcr before suspend");
1336 /* now do the actual savestate */
1337 cmd.header.in = savestate_header;
1338 rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
1339 "sending savestate before suspend");
1342 EXPORT_SYMBOL_GPL(tpm_pm_suspend);
1345 * Resume from a power safe. The BIOS already restored
1348 int tpm_pm_resume(struct device *dev)
1350 struct tpm_chip *chip = dev_get_drvdata(dev);
1358 EXPORT_SYMBOL_GPL(tpm_pm_resume);
1360 /* In case vendor provided release function, call it too.*/
1362 void tpm_dev_vendor_release(struct tpm_chip *chip)
1364 if (chip->vendor.release)
1365 chip->vendor.release(chip->dev);
1367 clear_bit(chip->dev_num, dev_mask);
1368 kfree(chip->vendor.miscdev.name);
1370 EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
1374 * Once all references to platform device are down to 0,
1375 * release all allocated structures.
1377 void tpm_dev_release(struct device *dev)
1379 struct tpm_chip *chip = dev_get_drvdata(dev);
1381 tpm_dev_vendor_release(chip);
1386 EXPORT_SYMBOL_GPL(tpm_dev_release);
1389 * Called from tpm_<specific>.c probe function only for devices
1390 * the driver has determined it should claim. Prior to calling
1391 * this function the specific probe function has called pci_enable_device
1392 * upon errant exit from this function specific probe function should call
1393 * pci_disable_device
1395 struct tpm_chip *tpm_register_hardware(struct device *dev,
1396 const struct tpm_vendor_specific *entry)
1398 #define DEVNAME_SIZE 7
1401 struct tpm_chip *chip;
1403 /* Driver specific per-device data */
1404 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1405 devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
1407 if (chip == NULL || devname == NULL)
1410 mutex_init(&chip->buffer_mutex);
1411 mutex_init(&chip->tpm_mutex);
1412 mutex_init(&chip->resume_mutex);
1413 INIT_LIST_HEAD(&chip->list);
1415 INIT_WORK(&chip->work, timeout_work);
1417 setup_timer(&chip->user_read_timer, user_reader_timeout,
1418 (unsigned long)chip);
1420 memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
1422 chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
1424 if (chip->dev_num >= TPM_NUM_DEVICES) {
1425 dev_err(dev, "No available tpm device numbers\n");
1427 } else if (chip->dev_num == 0)
1428 chip->vendor.miscdev.minor = TPM_MINOR;
1430 chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
1432 set_bit(chip->dev_num, dev_mask);
1434 scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
1435 chip->vendor.miscdev.name = devname;
1437 chip->vendor.miscdev.parent = dev;
1438 chip->dev = get_device(dev);
1439 chip->release = dev->release;
1440 dev->release = tpm_dev_release;
1441 dev_set_drvdata(dev, chip);
1443 if (misc_register(&chip->vendor.miscdev)) {
1445 "unable to misc_register %s, minor %d\n",
1446 chip->vendor.miscdev.name,
1447 chip->vendor.miscdev.minor);
1448 put_device(chip->dev);
1452 if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
1453 misc_deregister(&chip->vendor.miscdev);
1454 put_device(chip->dev);
1459 chip->bios_dir = tpm_bios_log_setup(devname);
1461 /* Make chip available */
1462 spin_lock(&driver_lock);
1463 list_add_rcu(&chip->list, &tpm_chip_list);
1464 spin_unlock(&driver_lock);
1473 EXPORT_SYMBOL_GPL(tpm_register_hardware);
1475 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
1476 MODULE_DESCRIPTION("TPM Driver");
1477 MODULE_VERSION("2.0");
1478 MODULE_LICENSE("GPL");