2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
30 #include "extent_map.h"
32 #include "transaction.h"
33 #include "print-tree.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
50 static void lock_chunks(struct btrfs_root *root)
52 mutex_lock(&root->fs_info->chunk_mutex);
55 static void unlock_chunks(struct btrfs_root *root)
57 mutex_unlock(&root->fs_info->chunk_mutex);
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
74 void btrfs_cleanup_fs_uuids(void)
76 struct btrfs_fs_devices *fs_devices;
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
89 struct btrfs_device *dev;
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 struct btrfs_fs_devices *fs_devices;
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
112 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
113 int flush, struct block_device **bdev,
114 struct buffer_head **bh)
118 *bdev = blkdev_get_by_path(device_path, flags, holder);
121 ret = PTR_ERR(*bdev);
122 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
127 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
128 ret = set_blocksize(*bdev, 4096);
130 blkdev_put(*bdev, flags);
133 invalidate_bdev(*bdev);
134 *bh = btrfs_read_dev_super(*bdev);
137 blkdev_put(*bdev, flags);
149 static void requeue_list(struct btrfs_pending_bios *pending_bios,
150 struct bio *head, struct bio *tail)
153 struct bio *old_head;
155 old_head = pending_bios->head;
156 pending_bios->head = head;
157 if (pending_bios->tail)
158 tail->bi_next = old_head;
160 pending_bios->tail = tail;
164 * we try to collect pending bios for a device so we don't get a large
165 * number of procs sending bios down to the same device. This greatly
166 * improves the schedulers ability to collect and merge the bios.
168 * But, it also turns into a long list of bios to process and that is sure
169 * to eventually make the worker thread block. The solution here is to
170 * make some progress and then put this work struct back at the end of
171 * the list if the block device is congested. This way, multiple devices
172 * can make progress from a single worker thread.
174 static noinline void run_scheduled_bios(struct btrfs_device *device)
177 struct backing_dev_info *bdi;
178 struct btrfs_fs_info *fs_info;
179 struct btrfs_pending_bios *pending_bios;
183 unsigned long num_run;
184 unsigned long batch_run = 0;
186 unsigned long last_waited = 0;
188 int sync_pending = 0;
189 struct blk_plug plug;
192 * this function runs all the bios we've collected for
193 * a particular device. We don't want to wander off to
194 * another device without first sending all of these down.
195 * So, setup a plug here and finish it off before we return
197 blk_start_plug(&plug);
199 bdi = blk_get_backing_dev_info(device->bdev);
200 fs_info = device->dev_root->fs_info;
201 limit = btrfs_async_submit_limit(fs_info);
202 limit = limit * 2 / 3;
205 spin_lock(&device->io_lock);
210 /* take all the bios off the list at once and process them
211 * later on (without the lock held). But, remember the
212 * tail and other pointers so the bios can be properly reinserted
213 * into the list if we hit congestion
215 if (!force_reg && device->pending_sync_bios.head) {
216 pending_bios = &device->pending_sync_bios;
219 pending_bios = &device->pending_bios;
223 pending = pending_bios->head;
224 tail = pending_bios->tail;
225 WARN_ON(pending && !tail);
228 * if pending was null this time around, no bios need processing
229 * at all and we can stop. Otherwise it'll loop back up again
230 * and do an additional check so no bios are missed.
232 * device->running_pending is used to synchronize with the
235 if (device->pending_sync_bios.head == NULL &&
236 device->pending_bios.head == NULL) {
238 device->running_pending = 0;
241 device->running_pending = 1;
244 pending_bios->head = NULL;
245 pending_bios->tail = NULL;
247 spin_unlock(&device->io_lock);
252 /* we want to work on both lists, but do more bios on the
253 * sync list than the regular list
256 pending_bios != &device->pending_sync_bios &&
257 device->pending_sync_bios.head) ||
258 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
259 device->pending_bios.head)) {
260 spin_lock(&device->io_lock);
261 requeue_list(pending_bios, pending, tail);
266 pending = pending->bi_next;
269 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
270 waitqueue_active(&fs_info->async_submit_wait))
271 wake_up(&fs_info->async_submit_wait);
273 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
276 * if we're doing the sync list, record that our
277 * plug has some sync requests on it
279 * If we're doing the regular list and there are
280 * sync requests sitting around, unplug before
283 if (pending_bios == &device->pending_sync_bios) {
285 } else if (sync_pending) {
286 blk_finish_plug(&plug);
287 blk_start_plug(&plug);
291 btrfsic_submit_bio(cur->bi_rw, cur);
298 * we made progress, there is more work to do and the bdi
299 * is now congested. Back off and let other work structs
302 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
303 fs_info->fs_devices->open_devices > 1) {
304 struct io_context *ioc;
306 ioc = current->io_context;
309 * the main goal here is that we don't want to
310 * block if we're going to be able to submit
311 * more requests without blocking.
313 * This code does two great things, it pokes into
314 * the elevator code from a filesystem _and_
315 * it makes assumptions about how batching works.
317 if (ioc && ioc->nr_batch_requests > 0 &&
318 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
320 ioc->last_waited == last_waited)) {
322 * we want to go through our batch of
323 * requests and stop. So, we copy out
324 * the ioc->last_waited time and test
325 * against it before looping
327 last_waited = ioc->last_waited;
332 spin_lock(&device->io_lock);
333 requeue_list(pending_bios, pending, tail);
334 device->running_pending = 1;
336 spin_unlock(&device->io_lock);
337 btrfs_requeue_work(&device->work);
340 /* unplug every 64 requests just for good measure */
341 if (batch_run % 64 == 0) {
342 blk_finish_plug(&plug);
343 blk_start_plug(&plug);
352 spin_lock(&device->io_lock);
353 if (device->pending_bios.head || device->pending_sync_bios.head)
355 spin_unlock(&device->io_lock);
358 blk_finish_plug(&plug);
361 static void pending_bios_fn(struct btrfs_work *work)
363 struct btrfs_device *device;
365 device = container_of(work, struct btrfs_device, work);
366 run_scheduled_bios(device);
369 static noinline int device_list_add(const char *path,
370 struct btrfs_super_block *disk_super,
371 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
373 struct btrfs_device *device;
374 struct btrfs_fs_devices *fs_devices;
375 struct rcu_string *name;
376 u64 found_transid = btrfs_super_generation(disk_super);
378 fs_devices = find_fsid(disk_super->fsid);
380 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
383 INIT_LIST_HEAD(&fs_devices->devices);
384 INIT_LIST_HEAD(&fs_devices->alloc_list);
385 list_add(&fs_devices->list, &fs_uuids);
386 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
387 fs_devices->latest_devid = devid;
388 fs_devices->latest_trans = found_transid;
389 mutex_init(&fs_devices->device_list_mutex);
392 device = __find_device(&fs_devices->devices, devid,
393 disk_super->dev_item.uuid);
396 if (fs_devices->opened)
399 device = kzalloc(sizeof(*device), GFP_NOFS);
401 /* we can safely leave the fs_devices entry around */
404 device->devid = devid;
405 device->dev_stats_valid = 0;
406 device->work.func = pending_bios_fn;
407 memcpy(device->uuid, disk_super->dev_item.uuid,
409 spin_lock_init(&device->io_lock);
411 name = rcu_string_strdup(path, GFP_NOFS);
416 rcu_assign_pointer(device->name, name);
417 INIT_LIST_HEAD(&device->dev_alloc_list);
419 /* init readahead state */
420 spin_lock_init(&device->reada_lock);
421 device->reada_curr_zone = NULL;
422 atomic_set(&device->reada_in_flight, 0);
423 device->reada_next = 0;
424 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
425 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
427 mutex_lock(&fs_devices->device_list_mutex);
428 list_add_rcu(&device->dev_list, &fs_devices->devices);
429 mutex_unlock(&fs_devices->device_list_mutex);
431 device->fs_devices = fs_devices;
432 fs_devices->num_devices++;
433 } else if (!device->name || strcmp(device->name->str, path)) {
434 name = rcu_string_strdup(path, GFP_NOFS);
437 rcu_string_free(device->name);
438 rcu_assign_pointer(device->name, name);
439 if (device->missing) {
440 fs_devices->missing_devices--;
445 if (found_transid > fs_devices->latest_trans) {
446 fs_devices->latest_devid = devid;
447 fs_devices->latest_trans = found_transid;
449 *fs_devices_ret = fs_devices;
453 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
455 struct btrfs_fs_devices *fs_devices;
456 struct btrfs_device *device;
457 struct btrfs_device *orig_dev;
459 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
461 return ERR_PTR(-ENOMEM);
463 INIT_LIST_HEAD(&fs_devices->devices);
464 INIT_LIST_HEAD(&fs_devices->alloc_list);
465 INIT_LIST_HEAD(&fs_devices->list);
466 mutex_init(&fs_devices->device_list_mutex);
467 fs_devices->latest_devid = orig->latest_devid;
468 fs_devices->latest_trans = orig->latest_trans;
469 fs_devices->total_devices = orig->total_devices;
470 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
472 /* We have held the volume lock, it is safe to get the devices. */
473 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
474 struct rcu_string *name;
476 device = kzalloc(sizeof(*device), GFP_NOFS);
481 * This is ok to do without rcu read locked because we hold the
482 * uuid mutex so nothing we touch in here is going to disappear.
484 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
489 rcu_assign_pointer(device->name, name);
491 device->devid = orig_dev->devid;
492 device->work.func = pending_bios_fn;
493 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
494 spin_lock_init(&device->io_lock);
495 INIT_LIST_HEAD(&device->dev_list);
496 INIT_LIST_HEAD(&device->dev_alloc_list);
498 list_add(&device->dev_list, &fs_devices->devices);
499 device->fs_devices = fs_devices;
500 fs_devices->num_devices++;
504 free_fs_devices(fs_devices);
505 return ERR_PTR(-ENOMEM);
508 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
510 struct btrfs_device *device, *next;
512 struct block_device *latest_bdev = NULL;
513 u64 latest_devid = 0;
514 u64 latest_transid = 0;
516 mutex_lock(&uuid_mutex);
518 /* This is the initialized path, it is safe to release the devices. */
519 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
520 if (device->in_fs_metadata) {
521 if (!latest_transid ||
522 device->generation > latest_transid) {
523 latest_devid = device->devid;
524 latest_transid = device->generation;
525 latest_bdev = device->bdev;
531 blkdev_put(device->bdev, device->mode);
533 fs_devices->open_devices--;
535 if (device->writeable) {
536 list_del_init(&device->dev_alloc_list);
537 device->writeable = 0;
538 fs_devices->rw_devices--;
540 list_del_init(&device->dev_list);
541 fs_devices->num_devices--;
542 rcu_string_free(device->name);
546 if (fs_devices->seed) {
547 fs_devices = fs_devices->seed;
551 fs_devices->latest_bdev = latest_bdev;
552 fs_devices->latest_devid = latest_devid;
553 fs_devices->latest_trans = latest_transid;
555 mutex_unlock(&uuid_mutex);
558 static void __free_device(struct work_struct *work)
560 struct btrfs_device *device;
562 device = container_of(work, struct btrfs_device, rcu_work);
565 blkdev_put(device->bdev, device->mode);
567 rcu_string_free(device->name);
571 static void free_device(struct rcu_head *head)
573 struct btrfs_device *device;
575 device = container_of(head, struct btrfs_device, rcu);
577 INIT_WORK(&device->rcu_work, __free_device);
578 schedule_work(&device->rcu_work);
581 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
583 struct btrfs_device *device;
585 if (--fs_devices->opened > 0)
588 mutex_lock(&fs_devices->device_list_mutex);
589 list_for_each_entry(device, &fs_devices->devices, dev_list) {
590 struct btrfs_device *new_device;
591 struct rcu_string *name;
594 fs_devices->open_devices--;
596 if (device->writeable) {
597 list_del_init(&device->dev_alloc_list);
598 fs_devices->rw_devices--;
601 if (device->can_discard)
602 fs_devices->num_can_discard--;
604 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
605 BUG_ON(!new_device); /* -ENOMEM */
606 memcpy(new_device, device, sizeof(*new_device));
608 /* Safe because we are under uuid_mutex */
610 name = rcu_string_strdup(device->name->str, GFP_NOFS);
611 BUG_ON(device->name && !name); /* -ENOMEM */
612 rcu_assign_pointer(new_device->name, name);
614 new_device->bdev = NULL;
615 new_device->writeable = 0;
616 new_device->in_fs_metadata = 0;
617 new_device->can_discard = 0;
618 list_replace_rcu(&device->dev_list, &new_device->dev_list);
620 call_rcu(&device->rcu, free_device);
622 mutex_unlock(&fs_devices->device_list_mutex);
624 WARN_ON(fs_devices->open_devices);
625 WARN_ON(fs_devices->rw_devices);
626 fs_devices->opened = 0;
627 fs_devices->seeding = 0;
632 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
634 struct btrfs_fs_devices *seed_devices = NULL;
637 mutex_lock(&uuid_mutex);
638 ret = __btrfs_close_devices(fs_devices);
639 if (!fs_devices->opened) {
640 seed_devices = fs_devices->seed;
641 fs_devices->seed = NULL;
643 mutex_unlock(&uuid_mutex);
645 while (seed_devices) {
646 fs_devices = seed_devices;
647 seed_devices = fs_devices->seed;
648 __btrfs_close_devices(fs_devices);
649 free_fs_devices(fs_devices);
654 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
655 fmode_t flags, void *holder)
657 struct request_queue *q;
658 struct block_device *bdev;
659 struct list_head *head = &fs_devices->devices;
660 struct btrfs_device *device;
661 struct block_device *latest_bdev = NULL;
662 struct buffer_head *bh;
663 struct btrfs_super_block *disk_super;
664 u64 latest_devid = 0;
665 u64 latest_transid = 0;
672 list_for_each_entry(device, head, dev_list) {
678 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
683 disk_super = (struct btrfs_super_block *)bh->b_data;
684 devid = btrfs_stack_device_id(&disk_super->dev_item);
685 if (devid != device->devid)
688 if (memcmp(device->uuid, disk_super->dev_item.uuid,
692 device->generation = btrfs_super_generation(disk_super);
693 if (!latest_transid || device->generation > latest_transid) {
694 latest_devid = devid;
695 latest_transid = device->generation;
699 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
700 device->writeable = 0;
702 device->writeable = !bdev_read_only(bdev);
706 q = bdev_get_queue(bdev);
707 if (blk_queue_discard(q)) {
708 device->can_discard = 1;
709 fs_devices->num_can_discard++;
713 device->in_fs_metadata = 0;
714 device->mode = flags;
716 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
717 fs_devices->rotating = 1;
719 fs_devices->open_devices++;
720 if (device->writeable) {
721 fs_devices->rw_devices++;
722 list_add(&device->dev_alloc_list,
723 &fs_devices->alloc_list);
730 blkdev_put(bdev, flags);
733 if (fs_devices->open_devices == 0) {
737 fs_devices->seeding = seeding;
738 fs_devices->opened = 1;
739 fs_devices->latest_bdev = latest_bdev;
740 fs_devices->latest_devid = latest_devid;
741 fs_devices->latest_trans = latest_transid;
742 fs_devices->total_rw_bytes = 0;
747 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
748 fmode_t flags, void *holder)
752 mutex_lock(&uuid_mutex);
753 if (fs_devices->opened) {
754 fs_devices->opened++;
757 ret = __btrfs_open_devices(fs_devices, flags, holder);
759 mutex_unlock(&uuid_mutex);
763 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
764 struct btrfs_fs_devices **fs_devices_ret)
766 struct btrfs_super_block *disk_super;
767 struct block_device *bdev;
768 struct buffer_head *bh;
775 mutex_lock(&uuid_mutex);
776 ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
779 disk_super = (struct btrfs_super_block *)bh->b_data;
780 devid = btrfs_stack_device_id(&disk_super->dev_item);
781 transid = btrfs_super_generation(disk_super);
782 total_devices = btrfs_super_num_devices(disk_super);
783 if (disk_super->label[0]) {
784 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
785 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
786 printk(KERN_INFO "device label %s ", disk_super->label);
788 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
790 printk(KERN_CONT "devid %llu transid %llu %s\n",
791 (unsigned long long)devid, (unsigned long long)transid, path);
792 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
793 if (!ret && fs_devices_ret)
794 (*fs_devices_ret)->total_devices = total_devices;
796 blkdev_put(bdev, flags);
798 mutex_unlock(&uuid_mutex);
802 /* helper to account the used device space in the range */
803 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
804 u64 end, u64 *length)
806 struct btrfs_key key;
807 struct btrfs_root *root = device->dev_root;
808 struct btrfs_dev_extent *dev_extent;
809 struct btrfs_path *path;
813 struct extent_buffer *l;
817 if (start >= device->total_bytes)
820 path = btrfs_alloc_path();
825 key.objectid = device->devid;
827 key.type = BTRFS_DEV_EXTENT_KEY;
829 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
833 ret = btrfs_previous_item(root, path, key.objectid, key.type);
840 slot = path->slots[0];
841 if (slot >= btrfs_header_nritems(l)) {
842 ret = btrfs_next_leaf(root, path);
850 btrfs_item_key_to_cpu(l, &key, slot);
852 if (key.objectid < device->devid)
855 if (key.objectid > device->devid)
858 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
861 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
862 extent_end = key.offset + btrfs_dev_extent_length(l,
864 if (key.offset <= start && extent_end > end) {
865 *length = end - start + 1;
867 } else if (key.offset <= start && extent_end > start)
868 *length += extent_end - start;
869 else if (key.offset > start && extent_end <= end)
870 *length += extent_end - key.offset;
871 else if (key.offset > start && key.offset <= end) {
872 *length += end - key.offset + 1;
874 } else if (key.offset > end)
882 btrfs_free_path(path);
887 * find_free_dev_extent - find free space in the specified device
888 * @device: the device which we search the free space in
889 * @num_bytes: the size of the free space that we need
890 * @start: store the start of the free space.
891 * @len: the size of the free space. that we find, or the size of the max
892 * free space if we don't find suitable free space
894 * this uses a pretty simple search, the expectation is that it is
895 * called very infrequently and that a given device has a small number
898 * @start is used to store the start of the free space if we find. But if we
899 * don't find suitable free space, it will be used to store the start position
900 * of the max free space.
902 * @len is used to store the size of the free space that we find.
903 * But if we don't find suitable free space, it is used to store the size of
904 * the max free space.
906 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
907 u64 *start, u64 *len)
909 struct btrfs_key key;
910 struct btrfs_root *root = device->dev_root;
911 struct btrfs_dev_extent *dev_extent;
912 struct btrfs_path *path;
918 u64 search_end = device->total_bytes;
921 struct extent_buffer *l;
923 /* FIXME use last free of some kind */
925 /* we don't want to overwrite the superblock on the drive,
926 * so we make sure to start at an offset of at least 1MB
928 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
930 max_hole_start = search_start;
934 if (search_start >= search_end) {
939 path = btrfs_alloc_path();
946 key.objectid = device->devid;
947 key.offset = search_start;
948 key.type = BTRFS_DEV_EXTENT_KEY;
950 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
954 ret = btrfs_previous_item(root, path, key.objectid, key.type);
961 slot = path->slots[0];
962 if (slot >= btrfs_header_nritems(l)) {
963 ret = btrfs_next_leaf(root, path);
971 btrfs_item_key_to_cpu(l, &key, slot);
973 if (key.objectid < device->devid)
976 if (key.objectid > device->devid)
979 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
982 if (key.offset > search_start) {
983 hole_size = key.offset - search_start;
985 if (hole_size > max_hole_size) {
986 max_hole_start = search_start;
987 max_hole_size = hole_size;
991 * If this free space is greater than which we need,
992 * it must be the max free space that we have found
993 * until now, so max_hole_start must point to the start
994 * of this free space and the length of this free space
995 * is stored in max_hole_size. Thus, we return
996 * max_hole_start and max_hole_size and go back to the
999 if (hole_size >= num_bytes) {
1005 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1006 extent_end = key.offset + btrfs_dev_extent_length(l,
1008 if (extent_end > search_start)
1009 search_start = extent_end;
1016 * At this point, search_start should be the end of
1017 * allocated dev extents, and when shrinking the device,
1018 * search_end may be smaller than search_start.
1020 if (search_end > search_start)
1021 hole_size = search_end - search_start;
1023 if (hole_size > max_hole_size) {
1024 max_hole_start = search_start;
1025 max_hole_size = hole_size;
1029 if (hole_size < num_bytes)
1035 btrfs_free_path(path);
1037 *start = max_hole_start;
1039 *len = max_hole_size;
1043 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1044 struct btrfs_device *device,
1048 struct btrfs_path *path;
1049 struct btrfs_root *root = device->dev_root;
1050 struct btrfs_key key;
1051 struct btrfs_key found_key;
1052 struct extent_buffer *leaf = NULL;
1053 struct btrfs_dev_extent *extent = NULL;
1055 path = btrfs_alloc_path();
1059 key.objectid = device->devid;
1061 key.type = BTRFS_DEV_EXTENT_KEY;
1063 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1065 ret = btrfs_previous_item(root, path, key.objectid,
1066 BTRFS_DEV_EXTENT_KEY);
1069 leaf = path->nodes[0];
1070 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1071 extent = btrfs_item_ptr(leaf, path->slots[0],
1072 struct btrfs_dev_extent);
1073 BUG_ON(found_key.offset > start || found_key.offset +
1074 btrfs_dev_extent_length(leaf, extent) < start);
1076 btrfs_release_path(path);
1078 } else if (ret == 0) {
1079 leaf = path->nodes[0];
1080 extent = btrfs_item_ptr(leaf, path->slots[0],
1081 struct btrfs_dev_extent);
1083 btrfs_error(root->fs_info, ret, "Slot search failed");
1087 if (device->bytes_used > 0) {
1088 u64 len = btrfs_dev_extent_length(leaf, extent);
1089 device->bytes_used -= len;
1090 spin_lock(&root->fs_info->free_chunk_lock);
1091 root->fs_info->free_chunk_space += len;
1092 spin_unlock(&root->fs_info->free_chunk_lock);
1094 ret = btrfs_del_item(trans, root, path);
1096 btrfs_error(root->fs_info, ret,
1097 "Failed to remove dev extent item");
1100 btrfs_free_path(path);
1104 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1105 struct btrfs_device *device,
1106 u64 chunk_tree, u64 chunk_objectid,
1107 u64 chunk_offset, u64 start, u64 num_bytes)
1110 struct btrfs_path *path;
1111 struct btrfs_root *root = device->dev_root;
1112 struct btrfs_dev_extent *extent;
1113 struct extent_buffer *leaf;
1114 struct btrfs_key key;
1116 WARN_ON(!device->in_fs_metadata);
1117 path = btrfs_alloc_path();
1121 key.objectid = device->devid;
1123 key.type = BTRFS_DEV_EXTENT_KEY;
1124 ret = btrfs_insert_empty_item(trans, root, path, &key,
1129 leaf = path->nodes[0];
1130 extent = btrfs_item_ptr(leaf, path->slots[0],
1131 struct btrfs_dev_extent);
1132 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1133 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1134 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1136 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1137 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1140 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1141 btrfs_mark_buffer_dirty(leaf);
1143 btrfs_free_path(path);
1147 static noinline int find_next_chunk(struct btrfs_root *root,
1148 u64 objectid, u64 *offset)
1150 struct btrfs_path *path;
1152 struct btrfs_key key;
1153 struct btrfs_chunk *chunk;
1154 struct btrfs_key found_key;
1156 path = btrfs_alloc_path();
1160 key.objectid = objectid;
1161 key.offset = (u64)-1;
1162 key.type = BTRFS_CHUNK_ITEM_KEY;
1164 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1168 BUG_ON(ret == 0); /* Corruption */
1170 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1174 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1176 if (found_key.objectid != objectid)
1179 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1180 struct btrfs_chunk);
1181 *offset = found_key.offset +
1182 btrfs_chunk_length(path->nodes[0], chunk);
1187 btrfs_free_path(path);
1191 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1194 struct btrfs_key key;
1195 struct btrfs_key found_key;
1196 struct btrfs_path *path;
1198 root = root->fs_info->chunk_root;
1200 path = btrfs_alloc_path();
1204 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1205 key.type = BTRFS_DEV_ITEM_KEY;
1206 key.offset = (u64)-1;
1208 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1212 BUG_ON(ret == 0); /* Corruption */
1214 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1215 BTRFS_DEV_ITEM_KEY);
1219 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1221 *objectid = found_key.offset + 1;
1225 btrfs_free_path(path);
1230 * the device information is stored in the chunk root
1231 * the btrfs_device struct should be fully filled in
1233 int btrfs_add_device(struct btrfs_trans_handle *trans,
1234 struct btrfs_root *root,
1235 struct btrfs_device *device)
1238 struct btrfs_path *path;
1239 struct btrfs_dev_item *dev_item;
1240 struct extent_buffer *leaf;
1241 struct btrfs_key key;
1244 root = root->fs_info->chunk_root;
1246 path = btrfs_alloc_path();
1250 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1251 key.type = BTRFS_DEV_ITEM_KEY;
1252 key.offset = device->devid;
1254 ret = btrfs_insert_empty_item(trans, root, path, &key,
1259 leaf = path->nodes[0];
1260 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1262 btrfs_set_device_id(leaf, dev_item, device->devid);
1263 btrfs_set_device_generation(leaf, dev_item, 0);
1264 btrfs_set_device_type(leaf, dev_item, device->type);
1265 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1266 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1267 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1268 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1269 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1270 btrfs_set_device_group(leaf, dev_item, 0);
1271 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1272 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1273 btrfs_set_device_start_offset(leaf, dev_item, 0);
1275 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1276 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1277 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1278 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1279 btrfs_mark_buffer_dirty(leaf);
1283 btrfs_free_path(path);
1287 static int btrfs_rm_dev_item(struct btrfs_root *root,
1288 struct btrfs_device *device)
1291 struct btrfs_path *path;
1292 struct btrfs_key key;
1293 struct btrfs_trans_handle *trans;
1295 root = root->fs_info->chunk_root;
1297 path = btrfs_alloc_path();
1301 trans = btrfs_start_transaction(root, 0);
1302 if (IS_ERR(trans)) {
1303 btrfs_free_path(path);
1304 return PTR_ERR(trans);
1306 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1307 key.type = BTRFS_DEV_ITEM_KEY;
1308 key.offset = device->devid;
1311 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1320 ret = btrfs_del_item(trans, root, path);
1324 btrfs_free_path(path);
1325 unlock_chunks(root);
1326 btrfs_commit_transaction(trans, root);
1330 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1332 struct btrfs_device *device;
1333 struct btrfs_device *next_device;
1334 struct block_device *bdev;
1335 struct buffer_head *bh = NULL;
1336 struct btrfs_super_block *disk_super;
1337 struct btrfs_fs_devices *cur_devices;
1343 bool clear_super = false;
1345 mutex_lock(&uuid_mutex);
1347 all_avail = root->fs_info->avail_data_alloc_bits |
1348 root->fs_info->avail_system_alloc_bits |
1349 root->fs_info->avail_metadata_alloc_bits;
1351 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1352 root->fs_info->fs_devices->num_devices <= 4) {
1353 printk(KERN_ERR "btrfs: unable to go below four devices "
1359 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1360 root->fs_info->fs_devices->num_devices <= 2) {
1361 printk(KERN_ERR "btrfs: unable to go below two "
1362 "devices on raid1\n");
1367 if (strcmp(device_path, "missing") == 0) {
1368 struct list_head *devices;
1369 struct btrfs_device *tmp;
1372 devices = &root->fs_info->fs_devices->devices;
1374 * It is safe to read the devices since the volume_mutex
1377 list_for_each_entry(tmp, devices, dev_list) {
1378 if (tmp->in_fs_metadata && !tmp->bdev) {
1387 printk(KERN_ERR "btrfs: no missing devices found to "
1392 ret = btrfs_get_bdev_and_sb(device_path,
1393 FMODE_READ | FMODE_EXCL,
1394 root->fs_info->bdev_holder, 0,
1398 disk_super = (struct btrfs_super_block *)bh->b_data;
1399 devid = btrfs_stack_device_id(&disk_super->dev_item);
1400 dev_uuid = disk_super->dev_item.uuid;
1401 device = btrfs_find_device(root, devid, dev_uuid,
1409 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1410 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1416 if (device->writeable) {
1418 list_del_init(&device->dev_alloc_list);
1419 unlock_chunks(root);
1420 root->fs_info->fs_devices->rw_devices--;
1424 ret = btrfs_shrink_device(device, 0);
1428 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1432 spin_lock(&root->fs_info->free_chunk_lock);
1433 root->fs_info->free_chunk_space = device->total_bytes -
1435 spin_unlock(&root->fs_info->free_chunk_lock);
1437 device->in_fs_metadata = 0;
1438 btrfs_scrub_cancel_dev(root, device);
1441 * the device list mutex makes sure that we don't change
1442 * the device list while someone else is writing out all
1443 * the device supers.
1446 cur_devices = device->fs_devices;
1447 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1448 list_del_rcu(&device->dev_list);
1450 device->fs_devices->num_devices--;
1451 device->fs_devices->total_devices--;
1453 if (device->missing)
1454 root->fs_info->fs_devices->missing_devices--;
1456 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1457 struct btrfs_device, dev_list);
1458 if (device->bdev == root->fs_info->sb->s_bdev)
1459 root->fs_info->sb->s_bdev = next_device->bdev;
1460 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1461 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1464 device->fs_devices->open_devices--;
1466 call_rcu(&device->rcu, free_device);
1467 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1469 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1470 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1472 if (cur_devices->open_devices == 0) {
1473 struct btrfs_fs_devices *fs_devices;
1474 fs_devices = root->fs_info->fs_devices;
1475 while (fs_devices) {
1476 if (fs_devices->seed == cur_devices)
1478 fs_devices = fs_devices->seed;
1480 fs_devices->seed = cur_devices->seed;
1481 cur_devices->seed = NULL;
1483 __btrfs_close_devices(cur_devices);
1484 unlock_chunks(root);
1485 free_fs_devices(cur_devices);
1488 root->fs_info->num_tolerated_disk_barrier_failures =
1489 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1492 * at this point, the device is zero sized. We want to
1493 * remove it from the devices list and zero out the old super
1496 /* make sure this device isn't detected as part of
1499 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1500 set_buffer_dirty(bh);
1501 sync_dirty_buffer(bh);
1510 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1512 mutex_unlock(&uuid_mutex);
1515 if (device->writeable) {
1517 list_add(&device->dev_alloc_list,
1518 &root->fs_info->fs_devices->alloc_list);
1519 unlock_chunks(root);
1520 root->fs_info->fs_devices->rw_devices++;
1525 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1526 struct btrfs_device **device)
1529 struct btrfs_super_block *disk_super;
1532 struct block_device *bdev;
1533 struct buffer_head *bh;
1536 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1537 root->fs_info->bdev_holder, 0, &bdev, &bh);
1540 disk_super = (struct btrfs_super_block *)bh->b_data;
1541 devid = btrfs_stack_device_id(&disk_super->dev_item);
1542 dev_uuid = disk_super->dev_item.uuid;
1543 *device = btrfs_find_device(root, devid, dev_uuid,
1548 blkdev_put(bdev, FMODE_READ);
1552 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1554 struct btrfs_device **device)
1557 if (strcmp(device_path, "missing") == 0) {
1558 struct list_head *devices;
1559 struct btrfs_device *tmp;
1561 devices = &root->fs_info->fs_devices->devices;
1563 * It is safe to read the devices since the volume_mutex
1564 * is held by the caller.
1566 list_for_each_entry(tmp, devices, dev_list) {
1567 if (tmp->in_fs_metadata && !tmp->bdev) {
1574 pr_err("btrfs: no missing device found\n");
1580 return btrfs_find_device_by_path(root, device_path, device);
1585 * does all the dirty work required for changing file system's UUID.
1587 static int btrfs_prepare_sprout(struct btrfs_root *root)
1589 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1590 struct btrfs_fs_devices *old_devices;
1591 struct btrfs_fs_devices *seed_devices;
1592 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1593 struct btrfs_device *device;
1596 BUG_ON(!mutex_is_locked(&uuid_mutex));
1597 if (!fs_devices->seeding)
1600 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1604 old_devices = clone_fs_devices(fs_devices);
1605 if (IS_ERR(old_devices)) {
1606 kfree(seed_devices);
1607 return PTR_ERR(old_devices);
1610 list_add(&old_devices->list, &fs_uuids);
1612 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1613 seed_devices->opened = 1;
1614 INIT_LIST_HEAD(&seed_devices->devices);
1615 INIT_LIST_HEAD(&seed_devices->alloc_list);
1616 mutex_init(&seed_devices->device_list_mutex);
1618 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1619 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1621 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1623 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1624 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1625 device->fs_devices = seed_devices;
1628 fs_devices->seeding = 0;
1629 fs_devices->num_devices = 0;
1630 fs_devices->open_devices = 0;
1631 fs_devices->total_devices = 0;
1632 fs_devices->seed = seed_devices;
1634 generate_random_uuid(fs_devices->fsid);
1635 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1636 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1637 super_flags = btrfs_super_flags(disk_super) &
1638 ~BTRFS_SUPER_FLAG_SEEDING;
1639 btrfs_set_super_flags(disk_super, super_flags);
1645 * strore the expected generation for seed devices in device items.
1647 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1648 struct btrfs_root *root)
1650 struct btrfs_path *path;
1651 struct extent_buffer *leaf;
1652 struct btrfs_dev_item *dev_item;
1653 struct btrfs_device *device;
1654 struct btrfs_key key;
1655 u8 fs_uuid[BTRFS_UUID_SIZE];
1656 u8 dev_uuid[BTRFS_UUID_SIZE];
1660 path = btrfs_alloc_path();
1664 root = root->fs_info->chunk_root;
1665 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1667 key.type = BTRFS_DEV_ITEM_KEY;
1670 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1674 leaf = path->nodes[0];
1676 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1677 ret = btrfs_next_leaf(root, path);
1682 leaf = path->nodes[0];
1683 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1684 btrfs_release_path(path);
1688 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1689 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1690 key.type != BTRFS_DEV_ITEM_KEY)
1693 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1694 struct btrfs_dev_item);
1695 devid = btrfs_device_id(leaf, dev_item);
1696 read_extent_buffer(leaf, dev_uuid,
1697 (unsigned long)btrfs_device_uuid(dev_item),
1699 read_extent_buffer(leaf, fs_uuid,
1700 (unsigned long)btrfs_device_fsid(dev_item),
1702 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1703 BUG_ON(!device); /* Logic error */
1705 if (device->fs_devices->seeding) {
1706 btrfs_set_device_generation(leaf, dev_item,
1707 device->generation);
1708 btrfs_mark_buffer_dirty(leaf);
1716 btrfs_free_path(path);
1720 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1722 struct request_queue *q;
1723 struct btrfs_trans_handle *trans;
1724 struct btrfs_device *device;
1725 struct block_device *bdev;
1726 struct list_head *devices;
1727 struct super_block *sb = root->fs_info->sb;
1728 struct rcu_string *name;
1730 int seeding_dev = 0;
1733 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1736 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1737 root->fs_info->bdev_holder);
1739 return PTR_ERR(bdev);
1741 if (root->fs_info->fs_devices->seeding) {
1743 down_write(&sb->s_umount);
1744 mutex_lock(&uuid_mutex);
1747 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1749 devices = &root->fs_info->fs_devices->devices;
1751 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1752 list_for_each_entry(device, devices, dev_list) {
1753 if (device->bdev == bdev) {
1756 &root->fs_info->fs_devices->device_list_mutex);
1760 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1762 device = kzalloc(sizeof(*device), GFP_NOFS);
1764 /* we can safely leave the fs_devices entry around */
1769 name = rcu_string_strdup(device_path, GFP_NOFS);
1775 rcu_assign_pointer(device->name, name);
1777 ret = find_next_devid(root, &device->devid);
1779 rcu_string_free(device->name);
1784 trans = btrfs_start_transaction(root, 0);
1785 if (IS_ERR(trans)) {
1786 rcu_string_free(device->name);
1788 ret = PTR_ERR(trans);
1794 q = bdev_get_queue(bdev);
1795 if (blk_queue_discard(q))
1796 device->can_discard = 1;
1797 device->writeable = 1;
1798 device->work.func = pending_bios_fn;
1799 generate_random_uuid(device->uuid);
1800 spin_lock_init(&device->io_lock);
1801 device->generation = trans->transid;
1802 device->io_width = root->sectorsize;
1803 device->io_align = root->sectorsize;
1804 device->sector_size = root->sectorsize;
1805 device->total_bytes = i_size_read(bdev->bd_inode);
1806 device->disk_total_bytes = device->total_bytes;
1807 device->dev_root = root->fs_info->dev_root;
1808 device->bdev = bdev;
1809 device->in_fs_metadata = 1;
1810 device->mode = FMODE_EXCL;
1811 set_blocksize(device->bdev, 4096);
1814 sb->s_flags &= ~MS_RDONLY;
1815 ret = btrfs_prepare_sprout(root);
1816 BUG_ON(ret); /* -ENOMEM */
1819 device->fs_devices = root->fs_info->fs_devices;
1821 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1822 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1823 list_add(&device->dev_alloc_list,
1824 &root->fs_info->fs_devices->alloc_list);
1825 root->fs_info->fs_devices->num_devices++;
1826 root->fs_info->fs_devices->open_devices++;
1827 root->fs_info->fs_devices->rw_devices++;
1828 root->fs_info->fs_devices->total_devices++;
1829 if (device->can_discard)
1830 root->fs_info->fs_devices->num_can_discard++;
1831 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1833 spin_lock(&root->fs_info->free_chunk_lock);
1834 root->fs_info->free_chunk_space += device->total_bytes;
1835 spin_unlock(&root->fs_info->free_chunk_lock);
1837 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1838 root->fs_info->fs_devices->rotating = 1;
1840 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1841 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1842 total_bytes + device->total_bytes);
1844 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1845 btrfs_set_super_num_devices(root->fs_info->super_copy,
1847 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1850 ret = init_first_rw_device(trans, root, device);
1852 btrfs_abort_transaction(trans, root, ret);
1855 ret = btrfs_finish_sprout(trans, root);
1857 btrfs_abort_transaction(trans, root, ret);
1861 ret = btrfs_add_device(trans, root, device);
1863 btrfs_abort_transaction(trans, root, ret);
1869 * we've got more storage, clear any full flags on the space
1872 btrfs_clear_space_info_full(root->fs_info);
1874 unlock_chunks(root);
1875 root->fs_info->num_tolerated_disk_barrier_failures =
1876 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1877 ret = btrfs_commit_transaction(trans, root);
1880 mutex_unlock(&uuid_mutex);
1881 up_write(&sb->s_umount);
1883 if (ret) /* transaction commit */
1886 ret = btrfs_relocate_sys_chunks(root);
1888 btrfs_error(root->fs_info, ret,
1889 "Failed to relocate sys chunks after "
1890 "device initialization. This can be fixed "
1891 "using the \"btrfs balance\" command.");
1892 trans = btrfs_attach_transaction(root);
1893 if (IS_ERR(trans)) {
1894 if (PTR_ERR(trans) == -ENOENT)
1896 return PTR_ERR(trans);
1898 ret = btrfs_commit_transaction(trans, root);
1904 unlock_chunks(root);
1905 btrfs_end_transaction(trans, root);
1906 rcu_string_free(device->name);
1909 blkdev_put(bdev, FMODE_EXCL);
1911 mutex_unlock(&uuid_mutex);
1912 up_write(&sb->s_umount);
1917 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1918 struct btrfs_device *device)
1921 struct btrfs_path *path;
1922 struct btrfs_root *root;
1923 struct btrfs_dev_item *dev_item;
1924 struct extent_buffer *leaf;
1925 struct btrfs_key key;
1927 root = device->dev_root->fs_info->chunk_root;
1929 path = btrfs_alloc_path();
1933 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1934 key.type = BTRFS_DEV_ITEM_KEY;
1935 key.offset = device->devid;
1937 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1946 leaf = path->nodes[0];
1947 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1949 btrfs_set_device_id(leaf, dev_item, device->devid);
1950 btrfs_set_device_type(leaf, dev_item, device->type);
1951 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1952 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1953 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1954 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1955 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1956 btrfs_mark_buffer_dirty(leaf);
1959 btrfs_free_path(path);
1963 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1964 struct btrfs_device *device, u64 new_size)
1966 struct btrfs_super_block *super_copy =
1967 device->dev_root->fs_info->super_copy;
1968 u64 old_total = btrfs_super_total_bytes(super_copy);
1969 u64 diff = new_size - device->total_bytes;
1971 if (!device->writeable)
1973 if (new_size <= device->total_bytes)
1976 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1977 device->fs_devices->total_rw_bytes += diff;
1979 device->total_bytes = new_size;
1980 device->disk_total_bytes = new_size;
1981 btrfs_clear_space_info_full(device->dev_root->fs_info);
1983 return btrfs_update_device(trans, device);
1986 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1987 struct btrfs_device *device, u64 new_size)
1990 lock_chunks(device->dev_root);
1991 ret = __btrfs_grow_device(trans, device, new_size);
1992 unlock_chunks(device->dev_root);
1996 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1997 struct btrfs_root *root,
1998 u64 chunk_tree, u64 chunk_objectid,
2002 struct btrfs_path *path;
2003 struct btrfs_key key;
2005 root = root->fs_info->chunk_root;
2006 path = btrfs_alloc_path();
2010 key.objectid = chunk_objectid;
2011 key.offset = chunk_offset;
2012 key.type = BTRFS_CHUNK_ITEM_KEY;
2014 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2017 else if (ret > 0) { /* Logic error or corruption */
2018 btrfs_error(root->fs_info, -ENOENT,
2019 "Failed lookup while freeing chunk.");
2024 ret = btrfs_del_item(trans, root, path);
2026 btrfs_error(root->fs_info, ret,
2027 "Failed to delete chunk item.");
2029 btrfs_free_path(path);
2033 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2036 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2037 struct btrfs_disk_key *disk_key;
2038 struct btrfs_chunk *chunk;
2045 struct btrfs_key key;
2047 array_size = btrfs_super_sys_array_size(super_copy);
2049 ptr = super_copy->sys_chunk_array;
2052 while (cur < array_size) {
2053 disk_key = (struct btrfs_disk_key *)ptr;
2054 btrfs_disk_key_to_cpu(&key, disk_key);
2056 len = sizeof(*disk_key);
2058 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2059 chunk = (struct btrfs_chunk *)(ptr + len);
2060 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2061 len += btrfs_chunk_item_size(num_stripes);
2066 if (key.objectid == chunk_objectid &&
2067 key.offset == chunk_offset) {
2068 memmove(ptr, ptr + len, array_size - (cur + len));
2070 btrfs_set_super_sys_array_size(super_copy, array_size);
2079 static int btrfs_relocate_chunk(struct btrfs_root *root,
2080 u64 chunk_tree, u64 chunk_objectid,
2083 struct extent_map_tree *em_tree;
2084 struct btrfs_root *extent_root;
2085 struct btrfs_trans_handle *trans;
2086 struct extent_map *em;
2087 struct map_lookup *map;
2091 root = root->fs_info->chunk_root;
2092 extent_root = root->fs_info->extent_root;
2093 em_tree = &root->fs_info->mapping_tree.map_tree;
2095 ret = btrfs_can_relocate(extent_root, chunk_offset);
2099 /* step one, relocate all the extents inside this chunk */
2100 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2104 trans = btrfs_start_transaction(root, 0);
2105 BUG_ON(IS_ERR(trans));
2110 * step two, delete the device extents and the
2111 * chunk tree entries
2113 read_lock(&em_tree->lock);
2114 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2115 read_unlock(&em_tree->lock);
2117 BUG_ON(!em || em->start > chunk_offset ||
2118 em->start + em->len < chunk_offset);
2119 map = (struct map_lookup *)em->bdev;
2121 for (i = 0; i < map->num_stripes; i++) {
2122 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2123 map->stripes[i].physical);
2126 if (map->stripes[i].dev) {
2127 ret = btrfs_update_device(trans, map->stripes[i].dev);
2131 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2136 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2138 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2139 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2143 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2146 write_lock(&em_tree->lock);
2147 remove_extent_mapping(em_tree, em);
2148 write_unlock(&em_tree->lock);
2153 /* once for the tree */
2154 free_extent_map(em);
2156 free_extent_map(em);
2158 unlock_chunks(root);
2159 btrfs_end_transaction(trans, root);
2163 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2165 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2166 struct btrfs_path *path;
2167 struct extent_buffer *leaf;
2168 struct btrfs_chunk *chunk;
2169 struct btrfs_key key;
2170 struct btrfs_key found_key;
2171 u64 chunk_tree = chunk_root->root_key.objectid;
2173 bool retried = false;
2177 path = btrfs_alloc_path();
2182 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2183 key.offset = (u64)-1;
2184 key.type = BTRFS_CHUNK_ITEM_KEY;
2187 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2190 BUG_ON(ret == 0); /* Corruption */
2192 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2199 leaf = path->nodes[0];
2200 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2202 chunk = btrfs_item_ptr(leaf, path->slots[0],
2203 struct btrfs_chunk);
2204 chunk_type = btrfs_chunk_type(leaf, chunk);
2205 btrfs_release_path(path);
2207 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2208 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2217 if (found_key.offset == 0)
2219 key.offset = found_key.offset - 1;
2222 if (failed && !retried) {
2226 } else if (failed && retried) {
2231 btrfs_free_path(path);
2235 static int insert_balance_item(struct btrfs_root *root,
2236 struct btrfs_balance_control *bctl)
2238 struct btrfs_trans_handle *trans;
2239 struct btrfs_balance_item *item;
2240 struct btrfs_disk_balance_args disk_bargs;
2241 struct btrfs_path *path;
2242 struct extent_buffer *leaf;
2243 struct btrfs_key key;
2246 path = btrfs_alloc_path();
2250 trans = btrfs_start_transaction(root, 0);
2251 if (IS_ERR(trans)) {
2252 btrfs_free_path(path);
2253 return PTR_ERR(trans);
2256 key.objectid = BTRFS_BALANCE_OBJECTID;
2257 key.type = BTRFS_BALANCE_ITEM_KEY;
2260 ret = btrfs_insert_empty_item(trans, root, path, &key,
2265 leaf = path->nodes[0];
2266 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2268 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2270 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2271 btrfs_set_balance_data(leaf, item, &disk_bargs);
2272 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2273 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2274 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2275 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2277 btrfs_set_balance_flags(leaf, item, bctl->flags);
2279 btrfs_mark_buffer_dirty(leaf);
2281 btrfs_free_path(path);
2282 err = btrfs_commit_transaction(trans, root);
2288 static int del_balance_item(struct btrfs_root *root)
2290 struct btrfs_trans_handle *trans;
2291 struct btrfs_path *path;
2292 struct btrfs_key key;
2295 path = btrfs_alloc_path();
2299 trans = btrfs_start_transaction(root, 0);
2300 if (IS_ERR(trans)) {
2301 btrfs_free_path(path);
2302 return PTR_ERR(trans);
2305 key.objectid = BTRFS_BALANCE_OBJECTID;
2306 key.type = BTRFS_BALANCE_ITEM_KEY;
2309 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2317 ret = btrfs_del_item(trans, root, path);
2319 btrfs_free_path(path);
2320 err = btrfs_commit_transaction(trans, root);
2327 * This is a heuristic used to reduce the number of chunks balanced on
2328 * resume after balance was interrupted.
2330 static void update_balance_args(struct btrfs_balance_control *bctl)
2333 * Turn on soft mode for chunk types that were being converted.
2335 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2336 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2337 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2338 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2339 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2340 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2343 * Turn on usage filter if is not already used. The idea is
2344 * that chunks that we have already balanced should be
2345 * reasonably full. Don't do it for chunks that are being
2346 * converted - that will keep us from relocating unconverted
2347 * (albeit full) chunks.
2349 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2350 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2351 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2352 bctl->data.usage = 90;
2354 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2355 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2356 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2357 bctl->sys.usage = 90;
2359 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2360 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2361 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2362 bctl->meta.usage = 90;
2367 * Should be called with both balance and volume mutexes held to
2368 * serialize other volume operations (add_dev/rm_dev/resize) with
2369 * restriper. Same goes for unset_balance_control.
2371 static void set_balance_control(struct btrfs_balance_control *bctl)
2373 struct btrfs_fs_info *fs_info = bctl->fs_info;
2375 BUG_ON(fs_info->balance_ctl);
2377 spin_lock(&fs_info->balance_lock);
2378 fs_info->balance_ctl = bctl;
2379 spin_unlock(&fs_info->balance_lock);
2382 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2384 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2386 BUG_ON(!fs_info->balance_ctl);
2388 spin_lock(&fs_info->balance_lock);
2389 fs_info->balance_ctl = NULL;
2390 spin_unlock(&fs_info->balance_lock);
2396 * Balance filters. Return 1 if chunk should be filtered out
2397 * (should not be balanced).
2399 static int chunk_profiles_filter(u64 chunk_type,
2400 struct btrfs_balance_args *bargs)
2402 chunk_type = chunk_to_extended(chunk_type) &
2403 BTRFS_EXTENDED_PROFILE_MASK;
2405 if (bargs->profiles & chunk_type)
2411 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2412 struct btrfs_balance_args *bargs)
2414 struct btrfs_block_group_cache *cache;
2415 u64 chunk_used, user_thresh;
2418 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2419 chunk_used = btrfs_block_group_used(&cache->item);
2421 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2422 if (chunk_used < user_thresh)
2425 btrfs_put_block_group(cache);
2429 static int chunk_devid_filter(struct extent_buffer *leaf,
2430 struct btrfs_chunk *chunk,
2431 struct btrfs_balance_args *bargs)
2433 struct btrfs_stripe *stripe;
2434 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2437 for (i = 0; i < num_stripes; i++) {
2438 stripe = btrfs_stripe_nr(chunk, i);
2439 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2446 /* [pstart, pend) */
2447 static int chunk_drange_filter(struct extent_buffer *leaf,
2448 struct btrfs_chunk *chunk,
2450 struct btrfs_balance_args *bargs)
2452 struct btrfs_stripe *stripe;
2453 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2459 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2462 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2463 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2467 factor = num_stripes / factor;
2469 for (i = 0; i < num_stripes; i++) {
2470 stripe = btrfs_stripe_nr(chunk, i);
2471 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2474 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2475 stripe_length = btrfs_chunk_length(leaf, chunk);
2476 do_div(stripe_length, factor);
2478 if (stripe_offset < bargs->pend &&
2479 stripe_offset + stripe_length > bargs->pstart)
2486 /* [vstart, vend) */
2487 static int chunk_vrange_filter(struct extent_buffer *leaf,
2488 struct btrfs_chunk *chunk,
2490 struct btrfs_balance_args *bargs)
2492 if (chunk_offset < bargs->vend &&
2493 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2494 /* at least part of the chunk is inside this vrange */
2500 static int chunk_soft_convert_filter(u64 chunk_type,
2501 struct btrfs_balance_args *bargs)
2503 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2506 chunk_type = chunk_to_extended(chunk_type) &
2507 BTRFS_EXTENDED_PROFILE_MASK;
2509 if (bargs->target == chunk_type)
2515 static int should_balance_chunk(struct btrfs_root *root,
2516 struct extent_buffer *leaf,
2517 struct btrfs_chunk *chunk, u64 chunk_offset)
2519 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2520 struct btrfs_balance_args *bargs = NULL;
2521 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2524 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2525 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2529 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2530 bargs = &bctl->data;
2531 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2533 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2534 bargs = &bctl->meta;
2536 /* profiles filter */
2537 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2538 chunk_profiles_filter(chunk_type, bargs)) {
2543 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2544 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2549 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2550 chunk_devid_filter(leaf, chunk, bargs)) {
2554 /* drange filter, makes sense only with devid filter */
2555 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2556 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2561 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2562 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2566 /* soft profile changing mode */
2567 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2568 chunk_soft_convert_filter(chunk_type, bargs)) {
2575 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2577 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2578 struct btrfs_root *chunk_root = fs_info->chunk_root;
2579 struct btrfs_root *dev_root = fs_info->dev_root;
2580 struct list_head *devices;
2581 struct btrfs_device *device;
2584 struct btrfs_chunk *chunk;
2585 struct btrfs_path *path;
2586 struct btrfs_key key;
2587 struct btrfs_key found_key;
2588 struct btrfs_trans_handle *trans;
2589 struct extent_buffer *leaf;
2592 int enospc_errors = 0;
2593 bool counting = true;
2595 /* step one make some room on all the devices */
2596 devices = &fs_info->fs_devices->devices;
2597 list_for_each_entry(device, devices, dev_list) {
2598 old_size = device->total_bytes;
2599 size_to_free = div_factor(old_size, 1);
2600 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2601 if (!device->writeable ||
2602 device->total_bytes - device->bytes_used > size_to_free)
2605 ret = btrfs_shrink_device(device, old_size - size_to_free);
2610 trans = btrfs_start_transaction(dev_root, 0);
2611 BUG_ON(IS_ERR(trans));
2613 ret = btrfs_grow_device(trans, device, old_size);
2616 btrfs_end_transaction(trans, dev_root);
2619 /* step two, relocate all the chunks */
2620 path = btrfs_alloc_path();
2626 /* zero out stat counters */
2627 spin_lock(&fs_info->balance_lock);
2628 memset(&bctl->stat, 0, sizeof(bctl->stat));
2629 spin_unlock(&fs_info->balance_lock);
2631 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2632 key.offset = (u64)-1;
2633 key.type = BTRFS_CHUNK_ITEM_KEY;
2636 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2637 atomic_read(&fs_info->balance_cancel_req)) {
2642 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2647 * this shouldn't happen, it means the last relocate
2651 BUG(); /* FIXME break ? */
2653 ret = btrfs_previous_item(chunk_root, path, 0,
2654 BTRFS_CHUNK_ITEM_KEY);
2660 leaf = path->nodes[0];
2661 slot = path->slots[0];
2662 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2664 if (found_key.objectid != key.objectid)
2667 /* chunk zero is special */
2668 if (found_key.offset == 0)
2671 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2674 spin_lock(&fs_info->balance_lock);
2675 bctl->stat.considered++;
2676 spin_unlock(&fs_info->balance_lock);
2679 ret = should_balance_chunk(chunk_root, leaf, chunk,
2681 btrfs_release_path(path);
2686 spin_lock(&fs_info->balance_lock);
2687 bctl->stat.expected++;
2688 spin_unlock(&fs_info->balance_lock);
2692 ret = btrfs_relocate_chunk(chunk_root,
2693 chunk_root->root_key.objectid,
2696 if (ret && ret != -ENOSPC)
2698 if (ret == -ENOSPC) {
2701 spin_lock(&fs_info->balance_lock);
2702 bctl->stat.completed++;
2703 spin_unlock(&fs_info->balance_lock);
2706 key.offset = found_key.offset - 1;
2710 btrfs_release_path(path);
2715 btrfs_free_path(path);
2716 if (enospc_errors) {
2717 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2727 * alloc_profile_is_valid - see if a given profile is valid and reduced
2728 * @flags: profile to validate
2729 * @extended: if true @flags is treated as an extended profile
2731 static int alloc_profile_is_valid(u64 flags, int extended)
2733 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2734 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2736 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2738 /* 1) check that all other bits are zeroed */
2742 /* 2) see if profile is reduced */
2744 return !extended; /* "0" is valid for usual profiles */
2746 /* true if exactly one bit set */
2747 return (flags & (flags - 1)) == 0;
2750 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2752 /* cancel requested || normal exit path */
2753 return atomic_read(&fs_info->balance_cancel_req) ||
2754 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2755 atomic_read(&fs_info->balance_cancel_req) == 0);
2758 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2762 unset_balance_control(fs_info);
2763 ret = del_balance_item(fs_info->tree_root);
2767 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2768 struct btrfs_ioctl_balance_args *bargs);
2771 * Should be called with both balance and volume mutexes held
2773 int btrfs_balance(struct btrfs_balance_control *bctl,
2774 struct btrfs_ioctl_balance_args *bargs)
2776 struct btrfs_fs_info *fs_info = bctl->fs_info;
2781 if (btrfs_fs_closing(fs_info) ||
2782 atomic_read(&fs_info->balance_pause_req) ||
2783 atomic_read(&fs_info->balance_cancel_req)) {
2788 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2789 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2793 * In case of mixed groups both data and meta should be picked,
2794 * and identical options should be given for both of them.
2796 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2797 if (mixed && (bctl->flags & allowed)) {
2798 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2799 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2800 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2801 printk(KERN_ERR "btrfs: with mixed groups data and "
2802 "metadata balance options must be the same\n");
2808 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2809 if (fs_info->fs_devices->num_devices == 1)
2810 allowed |= BTRFS_BLOCK_GROUP_DUP;
2811 else if (fs_info->fs_devices->num_devices < 4)
2812 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2814 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2815 BTRFS_BLOCK_GROUP_RAID10);
2817 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2818 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2819 (bctl->data.target & ~allowed))) {
2820 printk(KERN_ERR "btrfs: unable to start balance with target "
2821 "data profile %llu\n",
2822 (unsigned long long)bctl->data.target);
2826 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2827 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2828 (bctl->meta.target & ~allowed))) {
2829 printk(KERN_ERR "btrfs: unable to start balance with target "
2830 "metadata profile %llu\n",
2831 (unsigned long long)bctl->meta.target);
2835 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2836 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2837 (bctl->sys.target & ~allowed))) {
2838 printk(KERN_ERR "btrfs: unable to start balance with target "
2839 "system profile %llu\n",
2840 (unsigned long long)bctl->sys.target);
2845 /* allow dup'ed data chunks only in mixed mode */
2846 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2847 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2848 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2853 /* allow to reduce meta or sys integrity only if force set */
2854 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2855 BTRFS_BLOCK_GROUP_RAID10;
2856 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2857 (fs_info->avail_system_alloc_bits & allowed) &&
2858 !(bctl->sys.target & allowed)) ||
2859 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2860 (fs_info->avail_metadata_alloc_bits & allowed) &&
2861 !(bctl->meta.target & allowed))) {
2862 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2863 printk(KERN_INFO "btrfs: force reducing metadata "
2866 printk(KERN_ERR "btrfs: balance will reduce metadata "
2867 "integrity, use force if you want this\n");
2873 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2874 int num_tolerated_disk_barrier_failures;
2875 u64 target = bctl->sys.target;
2877 num_tolerated_disk_barrier_failures =
2878 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2879 if (num_tolerated_disk_barrier_failures > 0 &&
2881 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
2882 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
2883 num_tolerated_disk_barrier_failures = 0;
2884 else if (num_tolerated_disk_barrier_failures > 1 &&
2886 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
2887 num_tolerated_disk_barrier_failures = 1;
2889 fs_info->num_tolerated_disk_barrier_failures =
2890 num_tolerated_disk_barrier_failures;
2893 ret = insert_balance_item(fs_info->tree_root, bctl);
2894 if (ret && ret != -EEXIST)
2897 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2898 BUG_ON(ret == -EEXIST);
2899 set_balance_control(bctl);
2901 BUG_ON(ret != -EEXIST);
2902 spin_lock(&fs_info->balance_lock);
2903 update_balance_args(bctl);
2904 spin_unlock(&fs_info->balance_lock);
2907 atomic_inc(&fs_info->balance_running);
2908 mutex_unlock(&fs_info->balance_mutex);
2910 ret = __btrfs_balance(fs_info);
2912 mutex_lock(&fs_info->balance_mutex);
2913 atomic_dec(&fs_info->balance_running);
2916 memset(bargs, 0, sizeof(*bargs));
2917 update_ioctl_balance_args(fs_info, 0, bargs);
2920 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2921 balance_need_close(fs_info)) {
2922 __cancel_balance(fs_info);
2925 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2926 fs_info->num_tolerated_disk_barrier_failures =
2927 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2930 wake_up(&fs_info->balance_wait_q);
2934 if (bctl->flags & BTRFS_BALANCE_RESUME)
2935 __cancel_balance(fs_info);
2941 static int balance_kthread(void *data)
2943 struct btrfs_fs_info *fs_info = data;
2946 mutex_lock(&fs_info->volume_mutex);
2947 mutex_lock(&fs_info->balance_mutex);
2949 if (fs_info->balance_ctl) {
2950 printk(KERN_INFO "btrfs: continuing balance\n");
2951 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2954 mutex_unlock(&fs_info->balance_mutex);
2955 mutex_unlock(&fs_info->volume_mutex);
2960 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2962 struct task_struct *tsk;
2964 spin_lock(&fs_info->balance_lock);
2965 if (!fs_info->balance_ctl) {
2966 spin_unlock(&fs_info->balance_lock);
2969 spin_unlock(&fs_info->balance_lock);
2971 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2972 printk(KERN_INFO "btrfs: force skipping balance\n");
2976 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2978 return PTR_ERR(tsk);
2983 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2985 struct btrfs_balance_control *bctl;
2986 struct btrfs_balance_item *item;
2987 struct btrfs_disk_balance_args disk_bargs;
2988 struct btrfs_path *path;
2989 struct extent_buffer *leaf;
2990 struct btrfs_key key;
2993 path = btrfs_alloc_path();
2997 key.objectid = BTRFS_BALANCE_OBJECTID;
2998 key.type = BTRFS_BALANCE_ITEM_KEY;
3001 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3004 if (ret > 0) { /* ret = -ENOENT; */
3009 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3015 leaf = path->nodes[0];
3016 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3018 bctl->fs_info = fs_info;
3019 bctl->flags = btrfs_balance_flags(leaf, item);
3020 bctl->flags |= BTRFS_BALANCE_RESUME;
3022 btrfs_balance_data(leaf, item, &disk_bargs);
3023 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3024 btrfs_balance_meta(leaf, item, &disk_bargs);
3025 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3026 btrfs_balance_sys(leaf, item, &disk_bargs);
3027 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3029 mutex_lock(&fs_info->volume_mutex);
3030 mutex_lock(&fs_info->balance_mutex);
3032 set_balance_control(bctl);
3034 mutex_unlock(&fs_info->balance_mutex);
3035 mutex_unlock(&fs_info->volume_mutex);
3037 btrfs_free_path(path);
3041 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3045 mutex_lock(&fs_info->balance_mutex);
3046 if (!fs_info->balance_ctl) {
3047 mutex_unlock(&fs_info->balance_mutex);
3051 if (atomic_read(&fs_info->balance_running)) {
3052 atomic_inc(&fs_info->balance_pause_req);
3053 mutex_unlock(&fs_info->balance_mutex);
3055 wait_event(fs_info->balance_wait_q,
3056 atomic_read(&fs_info->balance_running) == 0);
3058 mutex_lock(&fs_info->balance_mutex);
3059 /* we are good with balance_ctl ripped off from under us */
3060 BUG_ON(atomic_read(&fs_info->balance_running));
3061 atomic_dec(&fs_info->balance_pause_req);
3066 mutex_unlock(&fs_info->balance_mutex);
3070 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3072 mutex_lock(&fs_info->balance_mutex);
3073 if (!fs_info->balance_ctl) {
3074 mutex_unlock(&fs_info->balance_mutex);
3078 atomic_inc(&fs_info->balance_cancel_req);
3080 * if we are running just wait and return, balance item is
3081 * deleted in btrfs_balance in this case
3083 if (atomic_read(&fs_info->balance_running)) {
3084 mutex_unlock(&fs_info->balance_mutex);
3085 wait_event(fs_info->balance_wait_q,
3086 atomic_read(&fs_info->balance_running) == 0);
3087 mutex_lock(&fs_info->balance_mutex);
3089 /* __cancel_balance needs volume_mutex */
3090 mutex_unlock(&fs_info->balance_mutex);
3091 mutex_lock(&fs_info->volume_mutex);
3092 mutex_lock(&fs_info->balance_mutex);
3094 if (fs_info->balance_ctl)
3095 __cancel_balance(fs_info);
3097 mutex_unlock(&fs_info->volume_mutex);
3100 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3101 atomic_dec(&fs_info->balance_cancel_req);
3102 mutex_unlock(&fs_info->balance_mutex);
3107 * shrinking a device means finding all of the device extents past
3108 * the new size, and then following the back refs to the chunks.
3109 * The chunk relocation code actually frees the device extent
3111 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3113 struct btrfs_trans_handle *trans;
3114 struct btrfs_root *root = device->dev_root;
3115 struct btrfs_dev_extent *dev_extent = NULL;
3116 struct btrfs_path *path;
3124 bool retried = false;
3125 struct extent_buffer *l;
3126 struct btrfs_key key;
3127 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3128 u64 old_total = btrfs_super_total_bytes(super_copy);
3129 u64 old_size = device->total_bytes;
3130 u64 diff = device->total_bytes - new_size;
3132 path = btrfs_alloc_path();
3140 device->total_bytes = new_size;
3141 if (device->writeable) {
3142 device->fs_devices->total_rw_bytes -= diff;
3143 spin_lock(&root->fs_info->free_chunk_lock);
3144 root->fs_info->free_chunk_space -= diff;
3145 spin_unlock(&root->fs_info->free_chunk_lock);
3147 unlock_chunks(root);
3150 key.objectid = device->devid;
3151 key.offset = (u64)-1;
3152 key.type = BTRFS_DEV_EXTENT_KEY;
3155 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3159 ret = btrfs_previous_item(root, path, 0, key.type);
3164 btrfs_release_path(path);
3169 slot = path->slots[0];
3170 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3172 if (key.objectid != device->devid) {
3173 btrfs_release_path(path);
3177 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3178 length = btrfs_dev_extent_length(l, dev_extent);
3180 if (key.offset + length <= new_size) {
3181 btrfs_release_path(path);
3185 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3186 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3187 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3188 btrfs_release_path(path);
3190 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3192 if (ret && ret != -ENOSPC)
3196 } while (key.offset-- > 0);
3198 if (failed && !retried) {
3202 } else if (failed && retried) {
3206 device->total_bytes = old_size;
3207 if (device->writeable)
3208 device->fs_devices->total_rw_bytes += diff;
3209 spin_lock(&root->fs_info->free_chunk_lock);
3210 root->fs_info->free_chunk_space += diff;
3211 spin_unlock(&root->fs_info->free_chunk_lock);
3212 unlock_chunks(root);
3216 /* Shrinking succeeded, else we would be at "done". */
3217 trans = btrfs_start_transaction(root, 0);
3218 if (IS_ERR(trans)) {
3219 ret = PTR_ERR(trans);
3225 device->disk_total_bytes = new_size;
3226 /* Now btrfs_update_device() will change the on-disk size. */
3227 ret = btrfs_update_device(trans, device);
3229 unlock_chunks(root);
3230 btrfs_end_transaction(trans, root);
3233 WARN_ON(diff > old_total);
3234 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3235 unlock_chunks(root);
3236 btrfs_end_transaction(trans, root);
3238 btrfs_free_path(path);
3242 static int btrfs_add_system_chunk(struct btrfs_root *root,
3243 struct btrfs_key *key,
3244 struct btrfs_chunk *chunk, int item_size)
3246 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3247 struct btrfs_disk_key disk_key;
3251 array_size = btrfs_super_sys_array_size(super_copy);
3252 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3255 ptr = super_copy->sys_chunk_array + array_size;
3256 btrfs_cpu_key_to_disk(&disk_key, key);
3257 memcpy(ptr, &disk_key, sizeof(disk_key));
3258 ptr += sizeof(disk_key);
3259 memcpy(ptr, chunk, item_size);
3260 item_size += sizeof(disk_key);
3261 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3266 * sort the devices in descending order by max_avail, total_avail
3268 static int btrfs_cmp_device_info(const void *a, const void *b)
3270 const struct btrfs_device_info *di_a = a;
3271 const struct btrfs_device_info *di_b = b;
3273 if (di_a->max_avail > di_b->max_avail)
3275 if (di_a->max_avail < di_b->max_avail)
3277 if (di_a->total_avail > di_b->total_avail)
3279 if (di_a->total_avail < di_b->total_avail)
3284 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3285 struct btrfs_root *extent_root,
3286 struct map_lookup **map_ret,
3287 u64 *num_bytes_out, u64 *stripe_size_out,
3288 u64 start, u64 type)
3290 struct btrfs_fs_info *info = extent_root->fs_info;
3291 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3292 struct list_head *cur;
3293 struct map_lookup *map = NULL;
3294 struct extent_map_tree *em_tree;
3295 struct extent_map *em;
3296 struct btrfs_device_info *devices_info = NULL;
3298 int num_stripes; /* total number of stripes to allocate */
3299 int sub_stripes; /* sub_stripes info for map */
3300 int dev_stripes; /* stripes per dev */
3301 int devs_max; /* max devs to use */
3302 int devs_min; /* min devs needed */
3303 int devs_increment; /* ndevs has to be a multiple of this */
3304 int ncopies; /* how many copies to data has */
3306 u64 max_stripe_size;
3314 BUG_ON(!alloc_profile_is_valid(type, 0));
3316 if (list_empty(&fs_devices->alloc_list))
3323 devs_max = 0; /* 0 == as many as possible */
3327 * define the properties of each RAID type.
3328 * FIXME: move this to a global table and use it in all RAID
3331 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3335 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3337 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3342 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3351 if (type & BTRFS_BLOCK_GROUP_DATA) {
3352 max_stripe_size = 1024 * 1024 * 1024;
3353 max_chunk_size = 10 * max_stripe_size;
3354 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3355 /* for larger filesystems, use larger metadata chunks */
3356 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3357 max_stripe_size = 1024 * 1024 * 1024;
3359 max_stripe_size = 256 * 1024 * 1024;
3360 max_chunk_size = max_stripe_size;
3361 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3362 max_stripe_size = 32 * 1024 * 1024;
3363 max_chunk_size = 2 * max_stripe_size;
3365 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3370 /* we don't want a chunk larger than 10% of writeable space */
3371 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3374 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3379 cur = fs_devices->alloc_list.next;
3382 * in the first pass through the devices list, we gather information
3383 * about the available holes on each device.
3386 while (cur != &fs_devices->alloc_list) {
3387 struct btrfs_device *device;
3391 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3395 if (!device->writeable) {
3397 "btrfs: read-only device in alloc_list\n");
3401 if (!device->in_fs_metadata)
3404 if (device->total_bytes > device->bytes_used)
3405 total_avail = device->total_bytes - device->bytes_used;
3409 /* If there is no space on this device, skip it. */
3410 if (total_avail == 0)
3413 ret = find_free_dev_extent(device,
3414 max_stripe_size * dev_stripes,
3415 &dev_offset, &max_avail);
3416 if (ret && ret != -ENOSPC)
3420 max_avail = max_stripe_size * dev_stripes;
3422 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3425 devices_info[ndevs].dev_offset = dev_offset;
3426 devices_info[ndevs].max_avail = max_avail;
3427 devices_info[ndevs].total_avail = total_avail;
3428 devices_info[ndevs].dev = device;
3433 * now sort the devices by hole size / available space
3435 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3436 btrfs_cmp_device_info, NULL);
3438 /* round down to number of usable stripes */
3439 ndevs -= ndevs % devs_increment;
3441 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3446 if (devs_max && ndevs > devs_max)
3449 * the primary goal is to maximize the number of stripes, so use as many
3450 * devices as possible, even if the stripes are not maximum sized.
3452 stripe_size = devices_info[ndevs-1].max_avail;
3453 num_stripes = ndevs * dev_stripes;
3455 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3456 stripe_size = max_chunk_size * ncopies;
3457 do_div(stripe_size, ndevs);
3460 do_div(stripe_size, dev_stripes);
3462 /* align to BTRFS_STRIPE_LEN */
3463 do_div(stripe_size, BTRFS_STRIPE_LEN);
3464 stripe_size *= BTRFS_STRIPE_LEN;
3466 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3471 map->num_stripes = num_stripes;
3473 for (i = 0; i < ndevs; ++i) {
3474 for (j = 0; j < dev_stripes; ++j) {
3475 int s = i * dev_stripes + j;
3476 map->stripes[s].dev = devices_info[i].dev;
3477 map->stripes[s].physical = devices_info[i].dev_offset +
3481 map->sector_size = extent_root->sectorsize;
3482 map->stripe_len = BTRFS_STRIPE_LEN;
3483 map->io_align = BTRFS_STRIPE_LEN;
3484 map->io_width = BTRFS_STRIPE_LEN;
3486 map->sub_stripes = sub_stripes;
3489 num_bytes = stripe_size * (num_stripes / ncopies);
3491 *stripe_size_out = stripe_size;
3492 *num_bytes_out = num_bytes;
3494 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3496 em = alloc_extent_map();
3501 em->bdev = (struct block_device *)map;
3503 em->len = num_bytes;
3504 em->block_start = 0;
3505 em->block_len = em->len;
3507 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3508 write_lock(&em_tree->lock);
3509 ret = add_extent_mapping(em_tree, em);
3510 write_unlock(&em_tree->lock);
3511 free_extent_map(em);
3515 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3516 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3521 for (i = 0; i < map->num_stripes; ++i) {
3522 struct btrfs_device *device;
3525 device = map->stripes[i].dev;
3526 dev_offset = map->stripes[i].physical;
3528 ret = btrfs_alloc_dev_extent(trans, device,
3529 info->chunk_root->root_key.objectid,
3530 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3531 start, dev_offset, stripe_size);
3533 btrfs_abort_transaction(trans, extent_root, ret);
3538 kfree(devices_info);
3543 kfree(devices_info);
3547 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3548 struct btrfs_root *extent_root,
3549 struct map_lookup *map, u64 chunk_offset,
3550 u64 chunk_size, u64 stripe_size)
3553 struct btrfs_key key;
3554 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3555 struct btrfs_device *device;
3556 struct btrfs_chunk *chunk;
3557 struct btrfs_stripe *stripe;
3558 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3562 chunk = kzalloc(item_size, GFP_NOFS);
3567 while (index < map->num_stripes) {
3568 device = map->stripes[index].dev;
3569 device->bytes_used += stripe_size;
3570 ret = btrfs_update_device(trans, device);
3576 spin_lock(&extent_root->fs_info->free_chunk_lock);
3577 extent_root->fs_info->free_chunk_space -= (stripe_size *
3579 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3582 stripe = &chunk->stripe;
3583 while (index < map->num_stripes) {
3584 device = map->stripes[index].dev;
3585 dev_offset = map->stripes[index].physical;
3587 btrfs_set_stack_stripe_devid(stripe, device->devid);
3588 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3589 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3594 btrfs_set_stack_chunk_length(chunk, chunk_size);
3595 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3596 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3597 btrfs_set_stack_chunk_type(chunk, map->type);
3598 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3599 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3600 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3601 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3602 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3604 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3605 key.type = BTRFS_CHUNK_ITEM_KEY;
3606 key.offset = chunk_offset;
3608 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3610 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3612 * TODO: Cleanup of inserted chunk root in case of
3615 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3625 * Chunk allocation falls into two parts. The first part does works
3626 * that make the new allocated chunk useable, but not do any operation
3627 * that modifies the chunk tree. The second part does the works that
3628 * require modifying the chunk tree. This division is important for the
3629 * bootstrap process of adding storage to a seed btrfs.
3631 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3632 struct btrfs_root *extent_root, u64 type)
3637 struct map_lookup *map;
3638 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3641 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3646 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3647 &stripe_size, chunk_offset, type);
3651 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3652 chunk_size, stripe_size);
3658 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3659 struct btrfs_root *root,
3660 struct btrfs_device *device)
3663 u64 sys_chunk_offset;
3667 u64 sys_stripe_size;
3669 struct map_lookup *map;
3670 struct map_lookup *sys_map;
3671 struct btrfs_fs_info *fs_info = root->fs_info;
3672 struct btrfs_root *extent_root = fs_info->extent_root;
3675 ret = find_next_chunk(fs_info->chunk_root,
3676 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3680 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3681 fs_info->avail_metadata_alloc_bits;
3682 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3684 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3685 &stripe_size, chunk_offset, alloc_profile);
3689 sys_chunk_offset = chunk_offset + chunk_size;
3691 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3692 fs_info->avail_system_alloc_bits;
3693 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3695 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3696 &sys_chunk_size, &sys_stripe_size,
3697 sys_chunk_offset, alloc_profile);
3699 btrfs_abort_transaction(trans, root, ret);
3703 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3705 btrfs_abort_transaction(trans, root, ret);
3710 * Modifying chunk tree needs allocating new blocks from both
3711 * system block group and metadata block group. So we only can
3712 * do operations require modifying the chunk tree after both
3713 * block groups were created.
3715 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3716 chunk_size, stripe_size);
3718 btrfs_abort_transaction(trans, root, ret);
3722 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3723 sys_chunk_offset, sys_chunk_size,
3726 btrfs_abort_transaction(trans, root, ret);
3733 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3735 struct extent_map *em;
3736 struct map_lookup *map;
3737 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3741 read_lock(&map_tree->map_tree.lock);
3742 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3743 read_unlock(&map_tree->map_tree.lock);
3747 if (btrfs_test_opt(root, DEGRADED)) {
3748 free_extent_map(em);
3752 map = (struct map_lookup *)em->bdev;
3753 for (i = 0; i < map->num_stripes; i++) {
3754 if (!map->stripes[i].dev->writeable) {
3759 free_extent_map(em);
3763 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3765 extent_map_tree_init(&tree->map_tree);
3768 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3770 struct extent_map *em;
3773 write_lock(&tree->map_tree.lock);
3774 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3776 remove_extent_mapping(&tree->map_tree, em);
3777 write_unlock(&tree->map_tree.lock);
3782 free_extent_map(em);
3783 /* once for the tree */
3784 free_extent_map(em);
3788 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3790 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3791 struct extent_map *em;
3792 struct map_lookup *map;
3793 struct extent_map_tree *em_tree = &map_tree->map_tree;
3796 read_lock(&em_tree->lock);
3797 em = lookup_extent_mapping(em_tree, logical, len);
3798 read_unlock(&em_tree->lock);
3801 BUG_ON(em->start > logical || em->start + em->len < logical);
3802 map = (struct map_lookup *)em->bdev;
3803 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3804 ret = map->num_stripes;
3805 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3806 ret = map->sub_stripes;
3809 free_extent_map(em);
3813 static int find_live_mirror(struct map_lookup *map, int first, int num,
3817 if (map->stripes[optimal].dev->bdev)
3819 for (i = first; i < first + num; i++) {
3820 if (map->stripes[i].dev->bdev)
3823 /* we couldn't find one that doesn't fail. Just return something
3824 * and the io error handling code will clean up eventually
3829 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3830 u64 logical, u64 *length,
3831 struct btrfs_bio **bbio_ret,
3834 struct extent_map *em;
3835 struct map_lookup *map;
3836 struct extent_map_tree *em_tree = &map_tree->map_tree;
3839 u64 stripe_end_offset;
3848 struct btrfs_bio *bbio = NULL;
3850 read_lock(&em_tree->lock);
3851 em = lookup_extent_mapping(em_tree, logical, *length);
3852 read_unlock(&em_tree->lock);
3855 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
3856 (unsigned long long)logical,
3857 (unsigned long long)*length);
3861 BUG_ON(em->start > logical || em->start + em->len < logical);
3862 map = (struct map_lookup *)em->bdev;
3863 offset = logical - em->start;
3865 if (mirror_num > map->num_stripes)
3870 * stripe_nr counts the total number of stripes we have to stride
3871 * to get to this block
3873 do_div(stripe_nr, map->stripe_len);
3875 stripe_offset = stripe_nr * map->stripe_len;
3876 BUG_ON(offset < stripe_offset);
3878 /* stripe_offset is the offset of this block in its stripe*/
3879 stripe_offset = offset - stripe_offset;
3881 if (rw & REQ_DISCARD)
3882 *length = min_t(u64, em->len - offset, *length);
3883 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3884 /* we limit the length of each bio to what fits in a stripe */
3885 *length = min_t(u64, em->len - offset,
3886 map->stripe_len - stripe_offset);
3888 *length = em->len - offset;
3896 stripe_nr_orig = stripe_nr;
3897 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3898 (~(map->stripe_len - 1));
3899 do_div(stripe_nr_end, map->stripe_len);
3900 stripe_end_offset = stripe_nr_end * map->stripe_len -
3902 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3903 if (rw & REQ_DISCARD)
3904 num_stripes = min_t(u64, map->num_stripes,
3905 stripe_nr_end - stripe_nr_orig);
3906 stripe_index = do_div(stripe_nr, map->num_stripes);
3907 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3908 if (rw & (REQ_WRITE | REQ_DISCARD))
3909 num_stripes = map->num_stripes;
3910 else if (mirror_num)
3911 stripe_index = mirror_num - 1;
3913 stripe_index = find_live_mirror(map, 0,
3915 current->pid % map->num_stripes);
3916 mirror_num = stripe_index + 1;
3919 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3920 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3921 num_stripes = map->num_stripes;
3922 } else if (mirror_num) {
3923 stripe_index = mirror_num - 1;
3928 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3929 int factor = map->num_stripes / map->sub_stripes;
3931 stripe_index = do_div(stripe_nr, factor);
3932 stripe_index *= map->sub_stripes;
3935 num_stripes = map->sub_stripes;
3936 else if (rw & REQ_DISCARD)
3937 num_stripes = min_t(u64, map->sub_stripes *
3938 (stripe_nr_end - stripe_nr_orig),
3940 else if (mirror_num)
3941 stripe_index += mirror_num - 1;
3943 int old_stripe_index = stripe_index;
3944 stripe_index = find_live_mirror(map, stripe_index,
3945 map->sub_stripes, stripe_index +
3946 current->pid % map->sub_stripes);
3947 mirror_num = stripe_index - old_stripe_index + 1;
3951 * after this do_div call, stripe_nr is the number of stripes
3952 * on this device we have to walk to find the data, and
3953 * stripe_index is the number of our device in the stripe array
3955 stripe_index = do_div(stripe_nr, map->num_stripes);
3956 mirror_num = stripe_index + 1;
3958 BUG_ON(stripe_index >= map->num_stripes);
3960 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3965 atomic_set(&bbio->error, 0);
3967 if (rw & REQ_DISCARD) {
3969 int sub_stripes = 0;
3970 u64 stripes_per_dev = 0;
3971 u32 remaining_stripes = 0;
3972 u32 last_stripe = 0;
3975 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3976 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3979 sub_stripes = map->sub_stripes;
3981 factor = map->num_stripes / sub_stripes;
3982 stripes_per_dev = div_u64_rem(stripe_nr_end -
3985 &remaining_stripes);
3986 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3987 last_stripe *= sub_stripes;
3990 for (i = 0; i < num_stripes; i++) {
3991 bbio->stripes[i].physical =
3992 map->stripes[stripe_index].physical +
3993 stripe_offset + stripe_nr * map->stripe_len;
3994 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3996 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3997 BTRFS_BLOCK_GROUP_RAID10)) {
3998 bbio->stripes[i].length = stripes_per_dev *
4001 if (i / sub_stripes < remaining_stripes)
4002 bbio->stripes[i].length +=
4006 * Special for the first stripe and
4009 * |-------|...|-------|
4013 if (i < sub_stripes)
4014 bbio->stripes[i].length -=
4017 if (stripe_index >= last_stripe &&
4018 stripe_index <= (last_stripe +
4020 bbio->stripes[i].length -=
4023 if (i == sub_stripes - 1)
4026 bbio->stripes[i].length = *length;
4029 if (stripe_index == map->num_stripes) {
4030 /* This could only happen for RAID0/10 */
4036 for (i = 0; i < num_stripes; i++) {
4037 bbio->stripes[i].physical =
4038 map->stripes[stripe_index].physical +
4040 stripe_nr * map->stripe_len;
4041 bbio->stripes[i].dev =
4042 map->stripes[stripe_index].dev;
4047 if (rw & REQ_WRITE) {
4048 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4049 BTRFS_BLOCK_GROUP_RAID10 |
4050 BTRFS_BLOCK_GROUP_DUP)) {
4056 bbio->num_stripes = num_stripes;
4057 bbio->max_errors = max_errors;
4058 bbio->mirror_num = mirror_num;
4060 free_extent_map(em);
4064 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
4065 u64 logical, u64 *length,
4066 struct btrfs_bio **bbio_ret, int mirror_num)
4068 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
4072 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4073 u64 chunk_start, u64 physical, u64 devid,
4074 u64 **logical, int *naddrs, int *stripe_len)
4076 struct extent_map_tree *em_tree = &map_tree->map_tree;
4077 struct extent_map *em;
4078 struct map_lookup *map;
4085 read_lock(&em_tree->lock);
4086 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4087 read_unlock(&em_tree->lock);
4089 BUG_ON(!em || em->start != chunk_start);
4090 map = (struct map_lookup *)em->bdev;
4093 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4094 do_div(length, map->num_stripes / map->sub_stripes);
4095 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4096 do_div(length, map->num_stripes);
4098 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4099 BUG_ON(!buf); /* -ENOMEM */
4101 for (i = 0; i < map->num_stripes; i++) {
4102 if (devid && map->stripes[i].dev->devid != devid)
4104 if (map->stripes[i].physical > physical ||
4105 map->stripes[i].physical + length <= physical)
4108 stripe_nr = physical - map->stripes[i].physical;
4109 do_div(stripe_nr, map->stripe_len);
4111 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4112 stripe_nr = stripe_nr * map->num_stripes + i;
4113 do_div(stripe_nr, map->sub_stripes);
4114 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4115 stripe_nr = stripe_nr * map->num_stripes + i;
4117 bytenr = chunk_start + stripe_nr * map->stripe_len;
4118 WARN_ON(nr >= map->num_stripes);
4119 for (j = 0; j < nr; j++) {
4120 if (buf[j] == bytenr)
4124 WARN_ON(nr >= map->num_stripes);
4131 *stripe_len = map->stripe_len;
4133 free_extent_map(em);
4137 static void *merge_stripe_index_into_bio_private(void *bi_private,
4138 unsigned int stripe_index)
4141 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4143 * The alternative solution (instead of stealing bits from the
4144 * pointer) would be to allocate an intermediate structure
4145 * that contains the old private pointer plus the stripe_index.
4147 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4148 BUG_ON(stripe_index > 3);
4149 return (void *)(((uintptr_t)bi_private) | stripe_index);
4152 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4154 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4157 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4159 return (unsigned int)((uintptr_t)bi_private) & 3;
4162 static void btrfs_end_bio(struct bio *bio, int err)
4164 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4165 int is_orig_bio = 0;
4168 atomic_inc(&bbio->error);
4169 if (err == -EIO || err == -EREMOTEIO) {
4170 unsigned int stripe_index =
4171 extract_stripe_index_from_bio_private(
4173 struct btrfs_device *dev;
4175 BUG_ON(stripe_index >= bbio->num_stripes);
4176 dev = bbio->stripes[stripe_index].dev;
4178 if (bio->bi_rw & WRITE)
4179 btrfs_dev_stat_inc(dev,
4180 BTRFS_DEV_STAT_WRITE_ERRS);
4182 btrfs_dev_stat_inc(dev,
4183 BTRFS_DEV_STAT_READ_ERRS);
4184 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4185 btrfs_dev_stat_inc(dev,
4186 BTRFS_DEV_STAT_FLUSH_ERRS);
4187 btrfs_dev_stat_print_on_error(dev);
4192 if (bio == bbio->orig_bio)
4195 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4198 bio = bbio->orig_bio;
4200 bio->bi_private = bbio->private;
4201 bio->bi_end_io = bbio->end_io;
4202 bio->bi_bdev = (struct block_device *)
4203 (unsigned long)bbio->mirror_num;
4204 /* only send an error to the higher layers if it is
4205 * beyond the tolerance of the multi-bio
4207 if (atomic_read(&bbio->error) > bbio->max_errors) {
4211 * this bio is actually up to date, we didn't
4212 * go over the max number of errors
4214 set_bit(BIO_UPTODATE, &bio->bi_flags);
4219 bio_endio(bio, err);
4220 } else if (!is_orig_bio) {
4225 struct async_sched {
4228 struct btrfs_fs_info *info;
4229 struct btrfs_work work;
4233 * see run_scheduled_bios for a description of why bios are collected for
4236 * This will add one bio to the pending list for a device and make sure
4237 * the work struct is scheduled.
4239 static noinline void schedule_bio(struct btrfs_root *root,
4240 struct btrfs_device *device,
4241 int rw, struct bio *bio)
4243 int should_queue = 1;
4244 struct btrfs_pending_bios *pending_bios;
4246 /* don't bother with additional async steps for reads, right now */
4247 if (!(rw & REQ_WRITE)) {
4249 btrfsic_submit_bio(rw, bio);
4255 * nr_async_bios allows us to reliably return congestion to the
4256 * higher layers. Otherwise, the async bio makes it appear we have
4257 * made progress against dirty pages when we've really just put it
4258 * on a queue for later
4260 atomic_inc(&root->fs_info->nr_async_bios);
4261 WARN_ON(bio->bi_next);
4262 bio->bi_next = NULL;
4265 spin_lock(&device->io_lock);
4266 if (bio->bi_rw & REQ_SYNC)
4267 pending_bios = &device->pending_sync_bios;
4269 pending_bios = &device->pending_bios;
4271 if (pending_bios->tail)
4272 pending_bios->tail->bi_next = bio;
4274 pending_bios->tail = bio;
4275 if (!pending_bios->head)
4276 pending_bios->head = bio;
4277 if (device->running_pending)
4280 spin_unlock(&device->io_lock);
4283 btrfs_queue_worker(&root->fs_info->submit_workers,
4287 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4290 struct bio_vec *prev;
4291 struct request_queue *q = bdev_get_queue(bdev);
4292 unsigned short max_sectors = queue_max_sectors(q);
4293 struct bvec_merge_data bvm = {
4295 .bi_sector = sector,
4296 .bi_rw = bio->bi_rw,
4299 if (bio->bi_vcnt == 0) {
4304 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4305 if ((bio->bi_size >> 9) > max_sectors)
4308 if (!q->merge_bvec_fn)
4311 bvm.bi_size = bio->bi_size - prev->bv_len;
4312 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4317 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4318 struct bio *bio, u64 physical, int dev_nr,
4321 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4323 bio->bi_private = bbio;
4324 bio->bi_private = merge_stripe_index_into_bio_private(
4325 bio->bi_private, (unsigned int)dev_nr);
4326 bio->bi_end_io = btrfs_end_bio;
4327 bio->bi_sector = physical >> 9;
4330 struct rcu_string *name;
4333 name = rcu_dereference(dev->name);
4334 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4335 "(%s id %llu), size=%u\n", rw,
4336 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4337 name->str, dev->devid, bio->bi_size);
4341 bio->bi_bdev = dev->bdev;
4343 schedule_bio(root, dev, rw, bio);
4345 btrfsic_submit_bio(rw, bio);
4348 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4349 struct bio *first_bio, struct btrfs_device *dev,
4350 int dev_nr, int rw, int async)
4352 struct bio_vec *bvec = first_bio->bi_io_vec;
4354 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4355 u64 physical = bbio->stripes[dev_nr].physical;
4358 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4362 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4363 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4364 bvec->bv_offset) < bvec->bv_len) {
4365 u64 len = bio->bi_size;
4367 atomic_inc(&bbio->stripes_pending);
4368 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4376 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4380 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4382 atomic_inc(&bbio->error);
4383 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4384 bio->bi_private = bbio->private;
4385 bio->bi_end_io = bbio->end_io;
4386 bio->bi_bdev = (struct block_device *)
4387 (unsigned long)bbio->mirror_num;
4388 bio->bi_sector = logical >> 9;
4390 bio_endio(bio, -EIO);
4394 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4395 int mirror_num, int async_submit)
4397 struct btrfs_mapping_tree *map_tree;
4398 struct btrfs_device *dev;
4399 struct bio *first_bio = bio;
4400 u64 logical = (u64)bio->bi_sector << 9;
4406 struct btrfs_bio *bbio = NULL;
4408 length = bio->bi_size;
4409 map_tree = &root->fs_info->mapping_tree;
4410 map_length = length;
4412 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4414 if (ret) /* -ENOMEM */
4417 total_devs = bbio->num_stripes;
4418 if (map_length < length) {
4419 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4420 "len %llu\n", (unsigned long long)logical,
4421 (unsigned long long)length,
4422 (unsigned long long)map_length);
4426 bbio->orig_bio = first_bio;
4427 bbio->private = first_bio->bi_private;
4428 bbio->end_io = first_bio->bi_end_io;
4429 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4431 while (dev_nr < total_devs) {
4432 dev = bbio->stripes[dev_nr].dev;
4433 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4434 bbio_error(bbio, first_bio, logical);
4440 * Check and see if we're ok with this bio based on it's size
4441 * and offset with the given device.
4443 if (!bio_size_ok(dev->bdev, first_bio,
4444 bbio->stripes[dev_nr].physical >> 9)) {
4445 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4446 dev_nr, rw, async_submit);
4452 if (dev_nr < total_devs - 1) {
4453 bio = bio_clone(first_bio, GFP_NOFS);
4454 BUG_ON(!bio); /* -ENOMEM */
4459 submit_stripe_bio(root, bbio, bio,
4460 bbio->stripes[dev_nr].physical, dev_nr, rw,
4467 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4470 struct btrfs_device *device;
4471 struct btrfs_fs_devices *cur_devices;
4473 cur_devices = root->fs_info->fs_devices;
4474 while (cur_devices) {
4476 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4477 device = __find_device(&cur_devices->devices,
4482 cur_devices = cur_devices->seed;
4487 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4488 u64 devid, u8 *dev_uuid)
4490 struct btrfs_device *device;
4491 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4493 device = kzalloc(sizeof(*device), GFP_NOFS);
4496 list_add(&device->dev_list,
4497 &fs_devices->devices);
4498 device->dev_root = root->fs_info->dev_root;
4499 device->devid = devid;
4500 device->work.func = pending_bios_fn;
4501 device->fs_devices = fs_devices;
4502 device->missing = 1;
4503 fs_devices->num_devices++;
4504 fs_devices->missing_devices++;
4505 spin_lock_init(&device->io_lock);
4506 INIT_LIST_HEAD(&device->dev_alloc_list);
4507 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4511 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4512 struct extent_buffer *leaf,
4513 struct btrfs_chunk *chunk)
4515 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4516 struct map_lookup *map;
4517 struct extent_map *em;
4521 u8 uuid[BTRFS_UUID_SIZE];
4526 logical = key->offset;
4527 length = btrfs_chunk_length(leaf, chunk);
4529 read_lock(&map_tree->map_tree.lock);
4530 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4531 read_unlock(&map_tree->map_tree.lock);
4533 /* already mapped? */
4534 if (em && em->start <= logical && em->start + em->len > logical) {
4535 free_extent_map(em);
4538 free_extent_map(em);
4541 em = alloc_extent_map();
4544 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4545 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4547 free_extent_map(em);
4551 em->bdev = (struct block_device *)map;
4552 em->start = logical;
4554 em->block_start = 0;
4555 em->block_len = em->len;
4557 map->num_stripes = num_stripes;
4558 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4559 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4560 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4561 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4562 map->type = btrfs_chunk_type(leaf, chunk);
4563 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4564 for (i = 0; i < num_stripes; i++) {
4565 map->stripes[i].physical =
4566 btrfs_stripe_offset_nr(leaf, chunk, i);
4567 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4568 read_extent_buffer(leaf, uuid, (unsigned long)
4569 btrfs_stripe_dev_uuid_nr(chunk, i),
4571 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4573 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4575 free_extent_map(em);
4578 if (!map->stripes[i].dev) {
4579 map->stripes[i].dev =
4580 add_missing_dev(root, devid, uuid);
4581 if (!map->stripes[i].dev) {
4583 free_extent_map(em);
4587 map->stripes[i].dev->in_fs_metadata = 1;
4590 write_lock(&map_tree->map_tree.lock);
4591 ret = add_extent_mapping(&map_tree->map_tree, em);
4592 write_unlock(&map_tree->map_tree.lock);
4593 BUG_ON(ret); /* Tree corruption */
4594 free_extent_map(em);
4599 static void fill_device_from_item(struct extent_buffer *leaf,
4600 struct btrfs_dev_item *dev_item,
4601 struct btrfs_device *device)
4605 device->devid = btrfs_device_id(leaf, dev_item);
4606 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4607 device->total_bytes = device->disk_total_bytes;
4608 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4609 device->type = btrfs_device_type(leaf, dev_item);
4610 device->io_align = btrfs_device_io_align(leaf, dev_item);
4611 device->io_width = btrfs_device_io_width(leaf, dev_item);
4612 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4614 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4615 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4618 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4620 struct btrfs_fs_devices *fs_devices;
4623 BUG_ON(!mutex_is_locked(&uuid_mutex));
4625 fs_devices = root->fs_info->fs_devices->seed;
4626 while (fs_devices) {
4627 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4631 fs_devices = fs_devices->seed;
4634 fs_devices = find_fsid(fsid);
4640 fs_devices = clone_fs_devices(fs_devices);
4641 if (IS_ERR(fs_devices)) {
4642 ret = PTR_ERR(fs_devices);
4646 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4647 root->fs_info->bdev_holder);
4649 free_fs_devices(fs_devices);
4653 if (!fs_devices->seeding) {
4654 __btrfs_close_devices(fs_devices);
4655 free_fs_devices(fs_devices);
4660 fs_devices->seed = root->fs_info->fs_devices->seed;
4661 root->fs_info->fs_devices->seed = fs_devices;
4666 static int read_one_dev(struct btrfs_root *root,
4667 struct extent_buffer *leaf,
4668 struct btrfs_dev_item *dev_item)
4670 struct btrfs_device *device;
4673 u8 fs_uuid[BTRFS_UUID_SIZE];
4674 u8 dev_uuid[BTRFS_UUID_SIZE];
4676 devid = btrfs_device_id(leaf, dev_item);
4677 read_extent_buffer(leaf, dev_uuid,
4678 (unsigned long)btrfs_device_uuid(dev_item),
4680 read_extent_buffer(leaf, fs_uuid,
4681 (unsigned long)btrfs_device_fsid(dev_item),
4684 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4685 ret = open_seed_devices(root, fs_uuid);
4686 if (ret && !btrfs_test_opt(root, DEGRADED))
4690 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4691 if (!device || !device->bdev) {
4692 if (!btrfs_test_opt(root, DEGRADED))
4696 printk(KERN_WARNING "warning devid %llu missing\n",
4697 (unsigned long long)devid);
4698 device = add_missing_dev(root, devid, dev_uuid);
4701 } else if (!device->missing) {
4703 * this happens when a device that was properly setup
4704 * in the device info lists suddenly goes bad.
4705 * device->bdev is NULL, and so we have to set
4706 * device->missing to one here
4708 root->fs_info->fs_devices->missing_devices++;
4709 device->missing = 1;
4713 if (device->fs_devices != root->fs_info->fs_devices) {
4714 BUG_ON(device->writeable);
4715 if (device->generation !=
4716 btrfs_device_generation(leaf, dev_item))
4720 fill_device_from_item(leaf, dev_item, device);
4721 device->dev_root = root->fs_info->dev_root;
4722 device->in_fs_metadata = 1;
4723 if (device->writeable) {
4724 device->fs_devices->total_rw_bytes += device->total_bytes;
4725 spin_lock(&root->fs_info->free_chunk_lock);
4726 root->fs_info->free_chunk_space += device->total_bytes -
4728 spin_unlock(&root->fs_info->free_chunk_lock);
4734 int btrfs_read_sys_array(struct btrfs_root *root)
4736 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4737 struct extent_buffer *sb;
4738 struct btrfs_disk_key *disk_key;
4739 struct btrfs_chunk *chunk;
4741 unsigned long sb_ptr;
4747 struct btrfs_key key;
4749 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4750 BTRFS_SUPER_INFO_SIZE);
4753 btrfs_set_buffer_uptodate(sb);
4754 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4756 * The sb extent buffer is artifical and just used to read the system array.
4757 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4758 * pages up-to-date when the page is larger: extent does not cover the
4759 * whole page and consequently check_page_uptodate does not find all
4760 * the page's extents up-to-date (the hole beyond sb),
4761 * write_extent_buffer then triggers a WARN_ON.
4763 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4764 * but sb spans only this function. Add an explicit SetPageUptodate call
4765 * to silence the warning eg. on PowerPC 64.
4767 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4768 SetPageUptodate(sb->pages[0]);
4770 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4771 array_size = btrfs_super_sys_array_size(super_copy);
4773 ptr = super_copy->sys_chunk_array;
4774 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4777 while (cur < array_size) {
4778 disk_key = (struct btrfs_disk_key *)ptr;
4779 btrfs_disk_key_to_cpu(&key, disk_key);
4781 len = sizeof(*disk_key); ptr += len;
4785 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4786 chunk = (struct btrfs_chunk *)sb_ptr;
4787 ret = read_one_chunk(root, &key, sb, chunk);
4790 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4791 len = btrfs_chunk_item_size(num_stripes);
4800 free_extent_buffer(sb);
4804 int btrfs_read_chunk_tree(struct btrfs_root *root)
4806 struct btrfs_path *path;
4807 struct extent_buffer *leaf;
4808 struct btrfs_key key;
4809 struct btrfs_key found_key;
4813 root = root->fs_info->chunk_root;
4815 path = btrfs_alloc_path();
4819 mutex_lock(&uuid_mutex);
4822 /* first we search for all of the device items, and then we
4823 * read in all of the chunk items. This way we can create chunk
4824 * mappings that reference all of the devices that are afound
4826 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4830 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4834 leaf = path->nodes[0];
4835 slot = path->slots[0];
4836 if (slot >= btrfs_header_nritems(leaf)) {
4837 ret = btrfs_next_leaf(root, path);
4844 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4845 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4846 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4848 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4849 struct btrfs_dev_item *dev_item;
4850 dev_item = btrfs_item_ptr(leaf, slot,
4851 struct btrfs_dev_item);
4852 ret = read_one_dev(root, leaf, dev_item);
4856 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4857 struct btrfs_chunk *chunk;
4858 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4859 ret = read_one_chunk(root, &found_key, leaf, chunk);
4865 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4867 btrfs_release_path(path);
4872 unlock_chunks(root);
4873 mutex_unlock(&uuid_mutex);
4875 btrfs_free_path(path);
4879 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4883 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4884 btrfs_dev_stat_reset(dev, i);
4887 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4889 struct btrfs_key key;
4890 struct btrfs_key found_key;
4891 struct btrfs_root *dev_root = fs_info->dev_root;
4892 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4893 struct extent_buffer *eb;
4896 struct btrfs_device *device;
4897 struct btrfs_path *path = NULL;
4900 path = btrfs_alloc_path();
4906 mutex_lock(&fs_devices->device_list_mutex);
4907 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4909 struct btrfs_dev_stats_item *ptr;
4912 key.type = BTRFS_DEV_STATS_KEY;
4913 key.offset = device->devid;
4914 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4916 __btrfs_reset_dev_stats(device);
4917 device->dev_stats_valid = 1;
4918 btrfs_release_path(path);
4921 slot = path->slots[0];
4922 eb = path->nodes[0];
4923 btrfs_item_key_to_cpu(eb, &found_key, slot);
4924 item_size = btrfs_item_size_nr(eb, slot);
4926 ptr = btrfs_item_ptr(eb, slot,
4927 struct btrfs_dev_stats_item);
4929 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4930 if (item_size >= (1 + i) * sizeof(__le64))
4931 btrfs_dev_stat_set(device, i,
4932 btrfs_dev_stats_value(eb, ptr, i));
4934 btrfs_dev_stat_reset(device, i);
4937 device->dev_stats_valid = 1;
4938 btrfs_dev_stat_print_on_load(device);
4939 btrfs_release_path(path);
4941 mutex_unlock(&fs_devices->device_list_mutex);
4944 btrfs_free_path(path);
4945 return ret < 0 ? ret : 0;
4948 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4949 struct btrfs_root *dev_root,
4950 struct btrfs_device *device)
4952 struct btrfs_path *path;
4953 struct btrfs_key key;
4954 struct extent_buffer *eb;
4955 struct btrfs_dev_stats_item *ptr;
4960 key.type = BTRFS_DEV_STATS_KEY;
4961 key.offset = device->devid;
4963 path = btrfs_alloc_path();
4965 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4967 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4968 ret, rcu_str_deref(device->name));
4973 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4974 /* need to delete old one and insert a new one */
4975 ret = btrfs_del_item(trans, dev_root, path);
4977 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4978 rcu_str_deref(device->name), ret);
4985 /* need to insert a new item */
4986 btrfs_release_path(path);
4987 ret = btrfs_insert_empty_item(trans, dev_root, path,
4988 &key, sizeof(*ptr));
4990 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4991 rcu_str_deref(device->name), ret);
4996 eb = path->nodes[0];
4997 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4998 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4999 btrfs_set_dev_stats_value(eb, ptr, i,
5000 btrfs_dev_stat_read(device, i));
5001 btrfs_mark_buffer_dirty(eb);
5004 btrfs_free_path(path);
5009 * called from commit_transaction. Writes all changed device stats to disk.
5011 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5012 struct btrfs_fs_info *fs_info)
5014 struct btrfs_root *dev_root = fs_info->dev_root;
5015 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5016 struct btrfs_device *device;
5019 mutex_lock(&fs_devices->device_list_mutex);
5020 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5021 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5024 ret = update_dev_stat_item(trans, dev_root, device);
5026 device->dev_stats_dirty = 0;
5028 mutex_unlock(&fs_devices->device_list_mutex);
5033 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5035 btrfs_dev_stat_inc(dev, index);
5036 btrfs_dev_stat_print_on_error(dev);
5039 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5041 if (!dev->dev_stats_valid)
5043 printk_ratelimited_in_rcu(KERN_ERR
5044 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5045 rcu_str_deref(dev->name),
5046 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5047 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5048 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5049 btrfs_dev_stat_read(dev,
5050 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5051 btrfs_dev_stat_read(dev,
5052 BTRFS_DEV_STAT_GENERATION_ERRS));
5055 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5059 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5060 if (btrfs_dev_stat_read(dev, i) != 0)
5062 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5063 return; /* all values == 0, suppress message */
5065 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5066 rcu_str_deref(dev->name),
5067 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5068 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5069 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5070 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5071 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5074 int btrfs_get_dev_stats(struct btrfs_root *root,
5075 struct btrfs_ioctl_get_dev_stats *stats)
5077 struct btrfs_device *dev;
5078 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5081 mutex_lock(&fs_devices->device_list_mutex);
5082 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
5083 mutex_unlock(&fs_devices->device_list_mutex);
5087 "btrfs: get dev_stats failed, device not found\n");
5089 } else if (!dev->dev_stats_valid) {
5091 "btrfs: get dev_stats failed, not yet valid\n");
5093 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5094 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5095 if (stats->nr_items > i)
5097 btrfs_dev_stat_read_and_reset(dev, i);
5099 btrfs_dev_stat_reset(dev, i);
5102 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5103 if (stats->nr_items > i)
5104 stats->values[i] = btrfs_dev_stat_read(dev, i);
5106 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5107 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;