2 * Copyright (C) 2012 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
8 * This file is released under the GPLv2.
10 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
11 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
12 * hash device. Setting this greatly improves performance when data and hash
13 * are on the same disk on different partitions on devices with poor random
19 #include <linux/module.h>
20 #include <linux/async.h>
21 #include <linux/delay.h>
22 #include <linux/device-mapper.h>
23 #include <crypto/hash.h>
24 #include "dm-verity.h"
26 #define DM_MSG_PREFIX "verity"
28 #define DM_VERITY_IO_VEC_INLINE 16
29 #define DM_VERITY_MEMPOOL_SIZE 4
30 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
32 #define DM_VERITY_MAX_LEVELS 63
33 #define DM_VERITY_NUM_POSITIONAL_ARGS 10
35 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
37 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
40 struct dm_dev *data_dev;
41 struct dm_dev *hash_dev;
43 struct dm_bufio_client *bufio;
45 struct crypto_shash *tfm;
46 u8 *root_digest; /* digest of the root block */
47 u8 *salt; /* salt: its size is salt_size */
49 sector_t data_start; /* data offset in 512-byte sectors */
50 sector_t hash_start; /* hash start in blocks */
51 sector_t data_blocks; /* the number of data blocks */
52 sector_t hash_blocks; /* the number of hash blocks */
53 unsigned char data_dev_block_bits; /* log2(data blocksize) */
54 unsigned char hash_dev_block_bits; /* log2(hash blocksize) */
55 unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
56 unsigned char levels; /* the number of tree levels */
57 unsigned char version;
58 unsigned digest_size; /* digest size for the current hash algorithm */
59 unsigned shash_descsize;/* the size of temporary space for crypto */
60 int hash_failed; /* set to 1 if hash of any block failed */
61 int error_behavior; /* selects error behavior on io erros */
63 mempool_t *io_mempool; /* mempool of struct dm_verity_io */
64 mempool_t *vec_mempool; /* mempool of bio vector */
66 struct workqueue_struct *verify_wq;
68 /* starting blocks for each tree level. 0 is the lowest level. */
69 sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
76 /* original values of bio->bi_end_io and bio->bi_private */
77 bio_end_io_t *orig_bi_end_io;
78 void *orig_bi_private;
83 /* saved bio vector */
84 struct bio_vec *io_vec;
87 struct work_struct work;
89 /* A space for short vectors; longer vectors are allocated separately. */
90 struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
93 * Three variably-size fields follow this struct:
95 * u8 hash_desc[v->shash_descsize];
96 * u8 real_digest[v->digest_size];
97 * u8 want_digest[v->digest_size];
99 * To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
103 struct dm_verity_prefetch_work {
104 struct work_struct work;
110 /* Provide a lightweight means of specifying the global default for
111 * error behavior: eio, reboot, or none
112 * Legacy support for 0 = eio, 1 = reboot/panic, 2 = none, 3 = notify.
113 * This is matched to the enum in dm-verity.h.
115 static const char *allowed_error_behaviors[] = { "eio", "panic", "none",
117 static char *error_behavior = "eio";
118 module_param(error_behavior, charp, 0644);
119 MODULE_PARM_DESC(error_behavior, "Behavior on error "
120 "(eio, panic, none, notify)");
122 /* Controls whether verity_get_device will wait forever for a device. */
124 module_param(dev_wait, int, 0444);
125 MODULE_PARM_DESC(dev_wait, "Wait forever for a backing device");
127 static BLOCKING_NOTIFIER_HEAD(verity_error_notifier);
129 int dm_verity_register_error_notifier(struct notifier_block *nb)
131 return blocking_notifier_chain_register(&verity_error_notifier, nb);
133 EXPORT_SYMBOL_GPL(dm_verity_register_error_notifier);
135 int dm_verity_unregister_error_notifier(struct notifier_block *nb)
137 return blocking_notifier_chain_unregister(&verity_error_notifier, nb);
139 EXPORT_SYMBOL_GPL(dm_verity_unregister_error_notifier);
141 /* If the request is not successful, this handler takes action.
142 * TODO make this call a registered handler.
144 static void verity_error(struct dm_verity *v, struct dm_verity_io *io,
147 const char message[] = "integrity failure";
148 int error_behavior = DM_VERITY_ERROR_BEHAVIOR_PANIC;
151 struct dm_verity_error_state error_state;
156 devt = v->data_dev->bdev->bd_dev;
157 error_behavior = v->error_behavior;
159 DMERR_LIMIT("verification failure occurred: %s", message);
161 if (error_behavior == DM_VERITY_ERROR_BEHAVIOR_NOTIFY) {
162 error_state.code = error;
163 error_state.transient = 0;
164 error_state.block = block;
165 error_state.message = message;
166 error_state.dev_start = v->data_start;
167 error_state.dev_len = v->data_blocks;
168 error_state.dev = v->data_dev->bdev;
169 error_state.hash_dev_start = v->hash_start;
170 error_state.hash_dev_len = v->hash_blocks;
171 error_state.hash_dev = v->hash_dev->bdev;
173 /* Set default fallthrough behavior. */
174 error_state.behavior = DM_VERITY_ERROR_BEHAVIOR_PANIC;
175 error_behavior = DM_VERITY_ERROR_BEHAVIOR_PANIC;
177 if (!blocking_notifier_call_chain(
178 &verity_error_notifier, 0, &error_state)) {
179 error_behavior = error_state.behavior;
183 switch (error_behavior) {
184 case DM_VERITY_ERROR_BEHAVIOR_EIO:
186 case DM_VERITY_ERROR_BEHAVIOR_NONE:
194 panic("dm-verity failure: "
195 "device:%u:%u error:%d block:%llu message:%s",
196 MAJOR(devt), MINOR(devt), error, (u64)block, message);
200 * verity_parse_error_behavior - parse a behavior charp to the enum
201 * @behavior: NUL-terminated char array
203 * Checks if the behavior is valid either as text or as an index digit
204 * and returns the proper enum value or -1 on error.
206 static int verity_parse_error_behavior(const char *behavior)
208 const char **allowed = allowed_error_behaviors;
211 for (; *allowed; allowed++, index++)
212 if (!strcmp(*allowed, behavior) || behavior[0] == index)
218 /* Convert to the integer index matching the enum. */
219 return allowed - allowed_error_behaviors;
222 static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
224 return (struct shash_desc *)(io + 1);
227 static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
229 return (u8 *)(io + 1) + v->shash_descsize;
232 static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
234 return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
238 * Auxiliary structure appended to each dm-bufio buffer. If the value
239 * hash_verified is nonzero, hash of the block has been verified.
241 * The variable hash_verified is set to 0 when allocating the buffer, then
242 * it can be changed to 1 and it is never reset to 0 again.
244 * There is no lock around this value, a race condition can at worst cause
245 * that multiple processes verify the hash of the same buffer simultaneously
246 * and write 1 to hash_verified simultaneously.
247 * This condition is harmless, so we don't need locking.
254 * Initialize struct buffer_aux for a freshly created buffer.
256 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
258 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
260 aux->hash_verified = 0;
264 * Translate input sector number to the sector number on the target device.
266 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
268 return v->data_start + dm_target_offset(v->ti, bi_sector);
272 * Return hash position of a specified block at a specified tree level
273 * (0 is the lowest level).
274 * The lowest "hash_per_block_bits"-bits of the result denote hash position
275 * inside a hash block. The remaining bits denote location of the hash block.
277 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
280 return block >> (level * v->hash_per_block_bits);
283 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
284 sector_t *hash_block, unsigned *offset)
286 sector_t position = verity_position_at_level(v, block, level);
289 *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
294 idx = position & ((1 << v->hash_per_block_bits) - 1);
296 *offset = idx * v->digest_size;
298 *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
302 * Verify hash of a metadata block pertaining to the specified data block
303 * ("block" argument) at a specified level ("level" argument).
305 * On successful return, io_want_digest(v, io) contains the hash value for
306 * a lower tree level or for the data block (if we're at the lowest leve).
308 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
309 * If "skip_unverified" is false, unverified buffer is hashed and verified
310 * against current value of io_want_digest(v, io).
312 static int verity_verify_level(struct dm_verity_io *io, sector_t block,
313 int level, bool skip_unverified)
315 struct dm_verity *v = io->v;
316 struct dm_buffer *buf;
317 struct buffer_aux *aux;
323 verity_hash_at_level(v, block, level, &hash_block, &offset);
325 data = dm_bufio_read(v->bufio, hash_block, &buf);
326 if (unlikely(IS_ERR(data)))
327 return PTR_ERR(data);
329 aux = dm_bufio_get_aux_data(buf);
331 if (!aux->hash_verified) {
332 struct shash_desc *desc;
335 if (skip_unverified) {
340 desc = io_hash_desc(v, io);
342 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
343 r = crypto_shash_init(desc);
345 DMERR("crypto_shash_init failed: %d", r);
349 if (likely(v->version >= 1)) {
350 r = crypto_shash_update(desc, v->salt, v->salt_size);
352 DMERR("crypto_shash_update failed: %d", r);
357 r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
359 DMERR("crypto_shash_update failed: %d", r);
364 r = crypto_shash_update(desc, v->salt, v->salt_size);
366 DMERR("crypto_shash_update failed: %d", r);
371 result = io_real_digest(v, io);
372 r = crypto_shash_final(desc, result);
374 DMERR("crypto_shash_final failed: %d", r);
377 if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
378 DMERR_LIMIT("metadata block %llu is corrupted",
379 (unsigned long long)hash_block);
384 aux->hash_verified = 1;
389 memcpy(io_want_digest(v, io), data, v->digest_size);
391 dm_bufio_release(buf);
395 dm_bufio_release(buf);
401 * Verify one "dm_verity_io" structure.
403 static int verity_verify_io(struct dm_verity_io *io)
405 struct dm_verity *v = io->v;
408 unsigned vector = 0, offset = 0;
410 for (b = 0; b < io->n_blocks; b++) {
411 struct shash_desc *desc;
416 if (likely(v->levels)) {
418 * First, we try to get the requested hash for
419 * the current block. If the hash block itself is
420 * verified, zero is returned. If it isn't, this
421 * function returns 0 and we fall back to whole
422 * chain verification.
424 int r = verity_verify_level(io, io->block + b, 0, true);
426 goto test_block_hash;
431 memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
433 for (i = v->levels - 1; i >= 0; i--) {
434 int r = verity_verify_level(io, io->block + b, i, false);
440 desc = io_hash_desc(v, io);
442 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
443 r = crypto_shash_init(desc);
445 DMERR("crypto_shash_init failed: %d", r);
449 if (likely(v->version >= 1)) {
450 r = crypto_shash_update(desc, v->salt, v->salt_size);
452 DMERR("crypto_shash_update failed: %d", r);
457 todo = 1 << v->data_dev_block_bits;
463 BUG_ON(vector >= io->io_vec_size);
464 bv = &io->io_vec[vector];
465 page = kmap_atomic(bv->bv_page);
466 len = bv->bv_len - offset;
467 if (likely(len >= todo))
469 r = crypto_shash_update(desc,
470 page + bv->bv_offset + offset, len);
473 DMERR("crypto_shash_update failed: %d", r);
477 if (likely(offset == bv->bv_len)) {
485 r = crypto_shash_update(desc, v->salt, v->salt_size);
487 DMERR("crypto_shash_update failed: %d", r);
492 result = io_real_digest(v, io);
493 r = crypto_shash_final(desc, result);
495 DMERR("crypto_shash_final failed: %d", r);
498 if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
499 DMERR_LIMIT("data block %llu is corrupted",
500 (unsigned long long)(io->block + b));
505 BUG_ON(vector != io->io_vec_size);
512 * End one "io" structure with a given error.
514 static void verity_finish_io(struct dm_verity_io *io, int error)
516 struct bio *bio = io->bio;
517 struct dm_verity *v = io->v;
520 verity_error(v, io, error);
521 bio->bi_end_io = io->orig_bi_end_io;
522 bio->bi_private = io->orig_bi_private;
524 if (io->io_vec != io->io_vec_inline)
525 mempool_free(io->io_vec, v->vec_mempool);
527 mempool_free(io, v->io_mempool);
529 bio_endio(bio, error);
532 static void verity_work(struct work_struct *w)
534 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
536 verity_finish_io(io, verity_verify_io(io));
539 static void verity_end_io(struct bio *bio, int error)
541 struct dm_verity_io *io = bio->bi_private;
543 INIT_WORK(&io->work, verity_work);
544 queue_work(io->v->verify_wq, &io->work);
548 * Prefetch buffers for the specified io.
549 * The root buffer is not prefetched, it is assumed that it will be cached
552 static void verity_prefetch_io(struct work_struct *work)
554 struct dm_verity_prefetch_work *pw =
555 container_of(work, struct dm_verity_prefetch_work, work);
556 struct dm_verity *v = pw->v;
559 for (i = v->levels - 2; i >= 0; i--) {
560 sector_t hash_block_start;
561 sector_t hash_block_end;
562 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
563 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
565 unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster;
567 cluster >>= v->data_dev_block_bits;
568 if (unlikely(!cluster))
569 goto no_prefetch_cluster;
571 if (unlikely(cluster & (cluster - 1)))
572 cluster = 1 << (fls(cluster) - 1);
574 hash_block_start &= ~(sector_t)(cluster - 1);
575 hash_block_end |= cluster - 1;
576 if (unlikely(hash_block_end >= v->hash_blocks))
577 hash_block_end = v->hash_blocks - 1;
580 dm_bufio_prefetch(v->bufio, hash_block_start,
581 hash_block_end - hash_block_start + 1);
586 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
588 struct dm_verity_prefetch_work *pw;
590 pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
591 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
596 INIT_WORK(&pw->work, verity_prefetch_io);
598 pw->block = io->block;
599 pw->n_blocks = io->n_blocks;
600 queue_work(v->verify_wq, &pw->work);
604 * Bio map function. It allocates dm_verity_io structure and bio vector and
605 * fills them. Then it issues prefetches and the I/O.
607 static int verity_map(struct dm_target *ti, struct bio *bio,
608 union map_info *map_context)
610 struct dm_verity *v = ti->private;
611 struct dm_verity_io *io;
613 bio->bi_bdev = v->data_dev->bdev;
614 bio->bi_sector = verity_map_sector(v, bio->bi_sector);
616 if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
617 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
618 DMERR_LIMIT("unaligned io");
622 if ((bio->bi_sector + bio_sectors(bio)) >>
623 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
624 DMERR_LIMIT("io out of range");
628 if (bio_data_dir(bio) == WRITE) {
632 io = mempool_alloc(v->io_mempool, GFP_NOIO);
635 io->orig_bi_end_io = bio->bi_end_io;
636 io->orig_bi_private = bio->bi_private;
637 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
638 io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
640 bio->bi_end_io = verity_end_io;
641 bio->bi_private = io;
642 io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
643 if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
644 io->io_vec = io->io_vec_inline;
646 io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
647 memcpy(io->io_vec, bio_iovec(bio),
648 io->io_vec_size * sizeof(struct bio_vec));
650 verity_submit_prefetch(v, io);
652 generic_make_request(bio);
654 return DM_MAPIO_SUBMITTED;
658 * Status: V (valid) or C (corruption found)
660 static int verity_status(struct dm_target *ti, status_type_t type,
661 char *result, unsigned maxlen)
663 struct dm_verity *v = ti->private;
668 case STATUSTYPE_INFO:
669 DMEMIT("%c", v->hash_failed ? 'C' : 'V');
671 case STATUSTYPE_TABLE:
672 DMEMIT("%u %s %s %u %u %llu %llu %s ",
676 1 << v->data_dev_block_bits,
677 1 << v->hash_dev_block_bits,
678 (unsigned long long)v->data_blocks,
679 (unsigned long long)v->hash_start,
682 for (x = 0; x < v->digest_size; x++)
683 DMEMIT("%02x", v->root_digest[x]);
688 for (x = 0; x < v->salt_size; x++)
689 DMEMIT("%02x", v->salt[x]);
696 static int verity_ioctl(struct dm_target *ti, unsigned cmd,
699 struct dm_verity *v = ti->private;
703 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
704 r = scsi_verify_blk_ioctl(NULL, cmd);
706 return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
710 static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
711 struct bio_vec *biovec, int max_size)
713 struct dm_verity *v = ti->private;
714 struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
716 if (!q->merge_bvec_fn)
719 bvm->bi_bdev = v->data_dev->bdev;
720 bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
722 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
725 static int verity_iterate_devices(struct dm_target *ti,
726 iterate_devices_callout_fn fn, void *data)
728 struct dm_verity *v = ti->private;
730 return fn(ti, v->data_dev, v->data_start, ti->len, data);
733 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
735 struct dm_verity *v = ti->private;
737 if (limits->logical_block_size < 1 << v->data_dev_block_bits)
738 limits->logical_block_size = 1 << v->data_dev_block_bits;
740 if (limits->physical_block_size < 1 << v->data_dev_block_bits)
741 limits->physical_block_size = 1 << v->data_dev_block_bits;
743 blk_limits_io_min(limits, limits->logical_block_size);
746 static void verity_dtr(struct dm_target *ti)
748 struct dm_verity *v = ti->private;
751 destroy_workqueue(v->verify_wq);
754 mempool_destroy(v->vec_mempool);
757 mempool_destroy(v->io_mempool);
760 dm_bufio_client_destroy(v->bufio);
763 kfree(v->root_digest);
766 crypto_free_shash(v->tfm);
771 dm_put_device(ti, v->hash_dev);
774 dm_put_device(ti, v->data_dev);
780 * match_dev_by_uuid - callback for finding a partition using its uuid
781 * @dev: device passed in by the caller
782 * @data: opaque pointer to a uuid packed by part_pack_uuid().
784 * Returns 1 if the device matches, and 0 otherwise.
786 static int match_dev_by_uuid(struct device *dev, void *data)
789 struct hd_struct *part = dev_to_part(dev);
794 if (memcmp(uuid, part->info->uuid, sizeof(part->info->uuid)))
803 * dm_get_device_by_uuid: claim a device using its UUID
804 * @ti: current dm_target
805 * @uuid_string: 36 byte UUID hex encoded
806 * (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)
807 * @dm_dev: dm_dev to populate
809 * Wraps dm_get_device allowing it to use a unique partition id to
810 * find a given partition on any drive. This code is based on
811 * printk_all_partitions in that it walks all of the register block devices.
813 * N.B., uuid_string is not checked for safety just strlen().
815 static int dm_get_device_by_uuid(struct dm_target *ti, const char *uuid_str,
816 struct dm_dev **dm_dev)
818 struct device *dev = NULL;
820 char devt_buf[BDEVT_SIZE];
822 size_t uuid_length = strlen(uuid_str);
824 if (uuid_length < 36)
826 /* Pack the requested UUID in the expected format. */
827 part_pack_uuid(uuid_str, uuid);
829 dev = class_find_device(&block_class, NULL, uuid, &match_dev_by_uuid);
836 /* The caller may specify +/-%u after the UUID if they want a partition
837 * before or after the one identified.
839 if (uuid_length > 36) {
840 unsigned int part_offset;
842 unsigned minor = MINOR(devt);
843 if (sscanf(uuid_str + 36, "%c%u", &sign, &part_offset) == 2) {
845 minor += part_offset;
846 } else if (sign == '-') {
847 minor -= part_offset;
849 DMWARN("Trailing characters after UUID: %s\n",
852 devt = MKDEV(MAJOR(devt), minor);
856 /* Construct the dev name to pass to dm_get_device. dm_get_device
857 * doesn't support being passed a dev_t.
859 snprintf(devt_buf, sizeof(devt_buf), "%u:%u", MAJOR(devt), MINOR(devt));
861 /* TODO(wad) to make this generic we could also pass in the mode. */
862 if (!dm_get_device(ti, devt_buf, dm_table_get_mode(ti->table), dm_dev))
865 ti->error = "Failed to acquire device";
866 DMDEBUG("Failed to acquire discovered device %s", devt_buf);
869 ti->error = "Bad UUID";
870 DMDEBUG("Supplied value '%s' is an invalid UUID", uuid_str);
873 DMDEBUG("No matching partition for GUID: %s", uuid_str);
874 ti->error = "No matching GUID";
878 static int verity_get_device(struct dm_target *ti, const char *devname,
879 struct dm_dev **dm_dev)
882 /* Try the normal path first since if everything is ready, it
883 * will be the fastest.
885 if (!dm_get_device(ti, devname, /*FMODE_READ*/
886 dm_table_get_mode(ti->table), dm_dev))
889 /* Try the device by partition UUID */
890 if (!dm_get_device_by_uuid(ti, devname, dm_dev))
893 /* No need to be too aggressive since this is a slow path. */
895 } while (dev_wait && (driver_probe_done() != 0 || *dm_dev == NULL));
896 async_synchronize_full();
904 int data_block_size_bits;
905 int hash_block_size_bits;
907 u64 hash_start_block;
911 char *error_behavior;
914 static void pr_args(struct verity_args *args)
916 printk(KERN_INFO "VERITY args: version=%d data_device=%s hash_device=%s"
917 " data_block_size_bits=%d hash_block_size_bits=%d"
918 " num_data_blocks=%lld hash_start_block=%lld"
919 " algorithm=%s digest=%s salt=%s error_behavior=%s\n",
923 args->data_block_size_bits,
924 args->hash_block_size_bits,
925 args->num_data_blocks,
926 args->hash_start_block,
930 args->error_behavior);
934 * positional_args - collects the argments using the positional
939 * 2 - hash device - may be same as data device
940 * 3 - data block size log2
941 * 4 - hash block size log2
942 * 5 - number of data blocks
943 * 6 - hash start block
948 static char *positional_args(unsigned argc, char **argv,
949 struct verity_args *args)
952 unsigned long long num_ll;
955 if (argc != DM_VERITY_NUM_POSITIONAL_ARGS)
956 return "Invalid argument count: exactly 10 arguments required";
958 if (sscanf(argv[0], "%d%c", &num, &dummy) != 1 ||
960 return "Invalid version";
963 args->data_device = argv[1];
964 args->hash_device = argv[2];
967 if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
968 !num || (num & (num - 1)) ||
970 return "Invalid data device block size";
971 args->data_block_size_bits = ffs(num) - 1;
973 if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
974 !num || (num & (num - 1)) ||
976 return "Invalid hash device block size";
977 args->hash_block_size_bits = ffs(num) - 1;
979 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
980 (sector_t)(num_ll << (args->data_block_size_bits - SECTOR_SHIFT))
981 >> (args->data_block_size_bits - SECTOR_SHIFT) != num_ll)
982 return "Invalid data blocks";
983 args->num_data_blocks = num_ll;
986 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
987 (sector_t)(num_ll << (args->hash_block_size_bits - SECTOR_SHIFT))
988 >> (args->hash_block_size_bits - SECTOR_SHIFT) != num_ll)
989 return "Invalid hash start";
990 args->hash_start_block = num_ll;
993 args->algorithm = argv[7];
994 args->digest = argv[8];
995 args->salt = argv[9];
1000 static void splitarg(char *arg, char **key, char **val)
1002 *key = strsep(&arg, "=");
1003 *val = strsep(&arg, "");
1006 static char *chromeos_args(unsigned argc, char **argv, struct verity_args *args)
1013 args->data_block_size_bits = 12;
1014 args->hash_block_size_bits = 12;
1015 for (i = 0; i < argc; ++i) {
1016 DMWARN("Argument %d: '%s'", i, argv[i]);
1017 splitarg(argv[i], &key, &val);
1019 DMWARN("Bad argument %d: missing key?", i);
1020 return "Bad argument: missing key";
1023 DMWARN("Bad argument %d='%s': missing value", i, key);
1024 return "Bad argument: missing value";
1026 if (!strcmp(key, "alg")) {
1027 args->algorithm = val;
1028 } else if (!strcmp(key, "payload")) {
1029 args->data_device = val;
1030 } else if (!strcmp(key, "hashtree")) {
1031 args->hash_device = val;
1032 } else if (!strcmp(key, "root_hexdigest")) {
1034 } else if (!strcmp(key, "hashstart")) {
1035 if (strict_strtoul(val, 10, &num))
1036 return "Invalid hashstart";
1037 args->hash_start_block =
1038 num >> (args->hash_block_size_bits - SECTOR_SHIFT);
1039 args->num_data_blocks = args->hash_start_block;
1040 } else if (!strcmp(key, "error_behavior")) {
1041 args->error_behavior = val;
1042 } else if (!strcmp(key, "salt")) {
1049 #define NEEDARG(n) \
1051 return "Missing argument: " #n; \
1054 NEEDARG(args->algorithm);
1055 NEEDARG(args->data_device);
1056 NEEDARG(args->hash_device);
1057 NEEDARG(args->digest);
1064 * Target parameters:
1065 * <version> The current format is version 1.
1066 * Vsn 0 is compatible with original Chromium OS releases.
1071 * <the number of data blocks>
1072 * <hash start block>
1075 * <salt> Hex string or "-" if no salt.
1077 static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1079 struct verity_args args = { 0 };
1080 struct dm_verity *v;
1083 sector_t hash_position;
1085 args.error_behavior = error_behavior;
1086 if (argc == DM_VERITY_NUM_POSITIONAL_ARGS)
1087 ti->error = positional_args(argc, argv, &args);
1089 ti->error = chromeos_args(argc, argv, &args);
1096 v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
1098 ti->error = "Cannot allocate verity structure";
1104 v->version = args.version;
1106 r = verity_get_device(ti, args.data_device, &v->data_dev);
1108 ti->error = "Data device lookup failed";
1112 r = verity_get_device(ti, args.hash_device, &v->hash_dev);
1114 ti->error = "Data device lookup failed";
1118 v->data_dev_block_bits = args.data_block_size_bits;
1119 if ((1 << v->data_dev_block_bits) <
1120 bdev_logical_block_size(v->data_dev->bdev)) {
1121 ti->error = "Invalid data device block size";
1126 v->hash_dev_block_bits = args.hash_block_size_bits;
1127 if ((1 << v->data_dev_block_bits) <
1128 bdev_logical_block_size(v->hash_dev->bdev)) {
1129 ti->error = "Invalid hash device block size";
1134 v->data_blocks = args.num_data_blocks;
1135 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
1136 ti->error = "Data device is too small";
1141 v->hash_start = args.hash_start_block;
1143 v->alg_name = kstrdup(args.algorithm, GFP_KERNEL);
1145 ti->error = "Cannot allocate algorithm name";
1150 v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
1151 if (IS_ERR(v->tfm)) {
1152 ti->error = "Cannot initialize hash function";
1153 r = PTR_ERR(v->tfm);
1157 v->digest_size = crypto_shash_digestsize(v->tfm);
1158 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
1159 ti->error = "Digest size too big";
1164 sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
1166 v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
1167 if (!v->root_digest) {
1168 ti->error = "Cannot allocate root digest";
1172 if (strlen(args.digest) != v->digest_size * 2 ||
1173 hex2bin(v->root_digest, args.digest, v->digest_size)) {
1174 ti->error = "Invalid root digest";
1179 if (strcmp(args.salt, "-")) {
1180 v->salt_size = strlen(args.salt) / 2;
1181 v->salt = kmalloc(v->salt_size, GFP_KERNEL);
1183 ti->error = "Cannot allocate salt";
1187 if (strlen(args.salt) != v->salt_size * 2 ||
1188 hex2bin(v->salt, args.salt, v->salt_size)) {
1189 ti->error = "Invalid salt";
1195 v->hash_per_block_bits =
1196 fls((1 << v->hash_dev_block_bits) / v->digest_size) - 1;
1200 while (v->hash_per_block_bits * v->levels < 64 &&
1201 (unsigned long long)(v->data_blocks - 1) >>
1202 (v->hash_per_block_bits * v->levels))
1205 if (v->levels > DM_VERITY_MAX_LEVELS) {
1206 ti->error = "Too many tree levels";
1211 hash_position = v->hash_start;
1212 for (i = v->levels - 1; i >= 0; i--) {
1214 v->hash_level_block[i] = hash_position;
1215 s = verity_position_at_level(v, v->data_blocks, i);
1216 s = (s >> v->hash_per_block_bits) +
1217 !!(s & ((1 << v->hash_per_block_bits) - 1));
1218 if (hash_position + s < hash_position) {
1219 ti->error = "Hash device offset overflow";
1225 v->hash_blocks = hash_position;
1227 v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1228 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1229 dm_bufio_alloc_callback, NULL);
1230 if (IS_ERR(v->bufio)) {
1231 ti->error = "Cannot initialize dm-bufio";
1232 r = PTR_ERR(v->bufio);
1237 if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
1238 ti->error = "Hash device is too small";
1243 v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
1244 sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
1245 if (!v->io_mempool) {
1246 ti->error = "Cannot allocate io mempool";
1251 v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
1252 BIO_MAX_PAGES * sizeof(struct bio_vec));
1253 if (!v->vec_mempool) {
1254 ti->error = "Cannot allocate vector mempool";
1259 /* WQ_UNBOUND greatly improves performance when running on ramdisk */
1260 v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
1261 if (!v->verify_wq) {
1262 ti->error = "Cannot allocate workqueue";
1267 /* chromeos allows setting error_behavior from both the module
1268 * parameters and the device args.
1270 v->error_behavior = verity_parse_error_behavior(args.error_behavior);
1271 if (v->error_behavior == -1) {
1272 ti->error = "Bad error_behavior supplied";
1285 static struct target_type verity_target = {
1287 .version = {1, 0, 0},
1288 .module = THIS_MODULE,
1292 .status = verity_status,
1293 .ioctl = verity_ioctl,
1294 .merge = verity_merge,
1295 .iterate_devices = verity_iterate_devices,
1296 .io_hints = verity_io_hints,
1299 static int __init dm_verity_init(void)
1303 r = dm_register_target(&verity_target);
1305 DMERR("register failed %d", r);
1310 static void __exit dm_verity_exit(void)
1312 dm_unregister_target(&verity_target);
1315 module_init(dm_verity_init);
1316 module_exit(dm_verity_exit);
1318 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1319 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1320 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1321 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1322 MODULE_LICENSE("GPL");