Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[cascardo/linux.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <asm/div64.h>
30 #include "compat.h"
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43
44 static int init_first_rw_device(struct btrfs_trans_handle *trans,
45                                 struct btrfs_root *root,
46                                 struct btrfs_device *device);
47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
49 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
51
52 static DEFINE_MUTEX(uuid_mutex);
53 static LIST_HEAD(fs_uuids);
54
55 static void lock_chunks(struct btrfs_root *root)
56 {
57         mutex_lock(&root->fs_info->chunk_mutex);
58 }
59
60 static void unlock_chunks(struct btrfs_root *root)
61 {
62         mutex_unlock(&root->fs_info->chunk_mutex);
63 }
64
65 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
66 {
67         struct btrfs_device *device;
68         WARN_ON(fs_devices->opened);
69         while (!list_empty(&fs_devices->devices)) {
70                 device = list_entry(fs_devices->devices.next,
71                                     struct btrfs_device, dev_list);
72                 list_del(&device->dev_list);
73                 rcu_string_free(device->name);
74                 kfree(device);
75         }
76         kfree(fs_devices);
77 }
78
79 static void btrfs_kobject_uevent(struct block_device *bdev,
80                                  enum kobject_action action)
81 {
82         int ret;
83
84         ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
85         if (ret)
86                 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
87                         action,
88                         kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
89                         &disk_to_dev(bdev->bd_disk)->kobj);
90 }
91
92 void btrfs_cleanup_fs_uuids(void)
93 {
94         struct btrfs_fs_devices *fs_devices;
95
96         while (!list_empty(&fs_uuids)) {
97                 fs_devices = list_entry(fs_uuids.next,
98                                         struct btrfs_fs_devices, list);
99                 list_del(&fs_devices->list);
100                 free_fs_devices(fs_devices);
101         }
102 }
103
104 static noinline struct btrfs_device *__find_device(struct list_head *head,
105                                                    u64 devid, u8 *uuid)
106 {
107         struct btrfs_device *dev;
108
109         list_for_each_entry(dev, head, dev_list) {
110                 if (dev->devid == devid &&
111                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
112                         return dev;
113                 }
114         }
115         return NULL;
116 }
117
118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
119 {
120         struct btrfs_fs_devices *fs_devices;
121
122         list_for_each_entry(fs_devices, &fs_uuids, list) {
123                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
124                         return fs_devices;
125         }
126         return NULL;
127 }
128
129 static int
130 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
131                       int flush, struct block_device **bdev,
132                       struct buffer_head **bh)
133 {
134         int ret;
135
136         *bdev = blkdev_get_by_path(device_path, flags, holder);
137
138         if (IS_ERR(*bdev)) {
139                 ret = PTR_ERR(*bdev);
140                 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
141                 goto error;
142         }
143
144         if (flush)
145                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
146         ret = set_blocksize(*bdev, 4096);
147         if (ret) {
148                 blkdev_put(*bdev, flags);
149                 goto error;
150         }
151         invalidate_bdev(*bdev);
152         *bh = btrfs_read_dev_super(*bdev);
153         if (!*bh) {
154                 ret = -EINVAL;
155                 blkdev_put(*bdev, flags);
156                 goto error;
157         }
158
159         return 0;
160
161 error:
162         *bdev = NULL;
163         *bh = NULL;
164         return ret;
165 }
166
167 static void requeue_list(struct btrfs_pending_bios *pending_bios,
168                         struct bio *head, struct bio *tail)
169 {
170
171         struct bio *old_head;
172
173         old_head = pending_bios->head;
174         pending_bios->head = head;
175         if (pending_bios->tail)
176                 tail->bi_next = old_head;
177         else
178                 pending_bios->tail = tail;
179 }
180
181 /*
182  * we try to collect pending bios for a device so we don't get a large
183  * number of procs sending bios down to the same device.  This greatly
184  * improves the schedulers ability to collect and merge the bios.
185  *
186  * But, it also turns into a long list of bios to process and that is sure
187  * to eventually make the worker thread block.  The solution here is to
188  * make some progress and then put this work struct back at the end of
189  * the list if the block device is congested.  This way, multiple devices
190  * can make progress from a single worker thread.
191  */
192 static noinline void run_scheduled_bios(struct btrfs_device *device)
193 {
194         struct bio *pending;
195         struct backing_dev_info *bdi;
196         struct btrfs_fs_info *fs_info;
197         struct btrfs_pending_bios *pending_bios;
198         struct bio *tail;
199         struct bio *cur;
200         int again = 0;
201         unsigned long num_run;
202         unsigned long batch_run = 0;
203         unsigned long limit;
204         unsigned long last_waited = 0;
205         int force_reg = 0;
206         int sync_pending = 0;
207         struct blk_plug plug;
208
209         /*
210          * this function runs all the bios we've collected for
211          * a particular device.  We don't want to wander off to
212          * another device without first sending all of these down.
213          * So, setup a plug here and finish it off before we return
214          */
215         blk_start_plug(&plug);
216
217         bdi = blk_get_backing_dev_info(device->bdev);
218         fs_info = device->dev_root->fs_info;
219         limit = btrfs_async_submit_limit(fs_info);
220         limit = limit * 2 / 3;
221
222 loop:
223         spin_lock(&device->io_lock);
224
225 loop_lock:
226         num_run = 0;
227
228         /* take all the bios off the list at once and process them
229          * later on (without the lock held).  But, remember the
230          * tail and other pointers so the bios can be properly reinserted
231          * into the list if we hit congestion
232          */
233         if (!force_reg && device->pending_sync_bios.head) {
234                 pending_bios = &device->pending_sync_bios;
235                 force_reg = 1;
236         } else {
237                 pending_bios = &device->pending_bios;
238                 force_reg = 0;
239         }
240
241         pending = pending_bios->head;
242         tail = pending_bios->tail;
243         WARN_ON(pending && !tail);
244
245         /*
246          * if pending was null this time around, no bios need processing
247          * at all and we can stop.  Otherwise it'll loop back up again
248          * and do an additional check so no bios are missed.
249          *
250          * device->running_pending is used to synchronize with the
251          * schedule_bio code.
252          */
253         if (device->pending_sync_bios.head == NULL &&
254             device->pending_bios.head == NULL) {
255                 again = 0;
256                 device->running_pending = 0;
257         } else {
258                 again = 1;
259                 device->running_pending = 1;
260         }
261
262         pending_bios->head = NULL;
263         pending_bios->tail = NULL;
264
265         spin_unlock(&device->io_lock);
266
267         while (pending) {
268
269                 rmb();
270                 /* we want to work on both lists, but do more bios on the
271                  * sync list than the regular list
272                  */
273                 if ((num_run > 32 &&
274                     pending_bios != &device->pending_sync_bios &&
275                     device->pending_sync_bios.head) ||
276                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
277                     device->pending_bios.head)) {
278                         spin_lock(&device->io_lock);
279                         requeue_list(pending_bios, pending, tail);
280                         goto loop_lock;
281                 }
282
283                 cur = pending;
284                 pending = pending->bi_next;
285                 cur->bi_next = NULL;
286
287                 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
288                     waitqueue_active(&fs_info->async_submit_wait))
289                         wake_up(&fs_info->async_submit_wait);
290
291                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
292
293                 /*
294                  * if we're doing the sync list, record that our
295                  * plug has some sync requests on it
296                  *
297                  * If we're doing the regular list and there are
298                  * sync requests sitting around, unplug before
299                  * we add more
300                  */
301                 if (pending_bios == &device->pending_sync_bios) {
302                         sync_pending = 1;
303                 } else if (sync_pending) {
304                         blk_finish_plug(&plug);
305                         blk_start_plug(&plug);
306                         sync_pending = 0;
307                 }
308
309                 btrfsic_submit_bio(cur->bi_rw, cur);
310                 num_run++;
311                 batch_run++;
312                 if (need_resched())
313                         cond_resched();
314
315                 /*
316                  * we made progress, there is more work to do and the bdi
317                  * is now congested.  Back off and let other work structs
318                  * run instead
319                  */
320                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
321                     fs_info->fs_devices->open_devices > 1) {
322                         struct io_context *ioc;
323
324                         ioc = current->io_context;
325
326                         /*
327                          * the main goal here is that we don't want to
328                          * block if we're going to be able to submit
329                          * more requests without blocking.
330                          *
331                          * This code does two great things, it pokes into
332                          * the elevator code from a filesystem _and_
333                          * it makes assumptions about how batching works.
334                          */
335                         if (ioc && ioc->nr_batch_requests > 0 &&
336                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
337                             (last_waited == 0 ||
338                              ioc->last_waited == last_waited)) {
339                                 /*
340                                  * we want to go through our batch of
341                                  * requests and stop.  So, we copy out
342                                  * the ioc->last_waited time and test
343                                  * against it before looping
344                                  */
345                                 last_waited = ioc->last_waited;
346                                 if (need_resched())
347                                         cond_resched();
348                                 continue;
349                         }
350                         spin_lock(&device->io_lock);
351                         requeue_list(pending_bios, pending, tail);
352                         device->running_pending = 1;
353
354                         spin_unlock(&device->io_lock);
355                         btrfs_requeue_work(&device->work);
356                         goto done;
357                 }
358                 /* unplug every 64 requests just for good measure */
359                 if (batch_run % 64 == 0) {
360                         blk_finish_plug(&plug);
361                         blk_start_plug(&plug);
362                         sync_pending = 0;
363                 }
364         }
365
366         cond_resched();
367         if (again)
368                 goto loop;
369
370         spin_lock(&device->io_lock);
371         if (device->pending_bios.head || device->pending_sync_bios.head)
372                 goto loop_lock;
373         spin_unlock(&device->io_lock);
374
375 done:
376         blk_finish_plug(&plug);
377 }
378
379 static void pending_bios_fn(struct btrfs_work *work)
380 {
381         struct btrfs_device *device;
382
383         device = container_of(work, struct btrfs_device, work);
384         run_scheduled_bios(device);
385 }
386
387 static noinline int device_list_add(const char *path,
388                            struct btrfs_super_block *disk_super,
389                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
390 {
391         struct btrfs_device *device;
392         struct btrfs_fs_devices *fs_devices;
393         struct rcu_string *name;
394         u64 found_transid = btrfs_super_generation(disk_super);
395
396         fs_devices = find_fsid(disk_super->fsid);
397         if (!fs_devices) {
398                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
399                 if (!fs_devices)
400                         return -ENOMEM;
401                 INIT_LIST_HEAD(&fs_devices->devices);
402                 INIT_LIST_HEAD(&fs_devices->alloc_list);
403                 list_add(&fs_devices->list, &fs_uuids);
404                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
405                 fs_devices->latest_devid = devid;
406                 fs_devices->latest_trans = found_transid;
407                 mutex_init(&fs_devices->device_list_mutex);
408                 device = NULL;
409         } else {
410                 device = __find_device(&fs_devices->devices, devid,
411                                        disk_super->dev_item.uuid);
412         }
413         if (!device) {
414                 if (fs_devices->opened)
415                         return -EBUSY;
416
417                 device = kzalloc(sizeof(*device), GFP_NOFS);
418                 if (!device) {
419                         /* we can safely leave the fs_devices entry around */
420                         return -ENOMEM;
421                 }
422                 device->devid = devid;
423                 device->dev_stats_valid = 0;
424                 device->work.func = pending_bios_fn;
425                 memcpy(device->uuid, disk_super->dev_item.uuid,
426                        BTRFS_UUID_SIZE);
427                 spin_lock_init(&device->io_lock);
428
429                 name = rcu_string_strdup(path, GFP_NOFS);
430                 if (!name) {
431                         kfree(device);
432                         return -ENOMEM;
433                 }
434                 rcu_assign_pointer(device->name, name);
435                 INIT_LIST_HEAD(&device->dev_alloc_list);
436
437                 /* init readahead state */
438                 spin_lock_init(&device->reada_lock);
439                 device->reada_curr_zone = NULL;
440                 atomic_set(&device->reada_in_flight, 0);
441                 device->reada_next = 0;
442                 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
443                 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
444
445                 mutex_lock(&fs_devices->device_list_mutex);
446                 list_add_rcu(&device->dev_list, &fs_devices->devices);
447                 mutex_unlock(&fs_devices->device_list_mutex);
448
449                 device->fs_devices = fs_devices;
450                 fs_devices->num_devices++;
451         } else if (!device->name || strcmp(device->name->str, path)) {
452                 name = rcu_string_strdup(path, GFP_NOFS);
453                 if (!name)
454                         return -ENOMEM;
455                 rcu_string_free(device->name);
456                 rcu_assign_pointer(device->name, name);
457                 if (device->missing) {
458                         fs_devices->missing_devices--;
459                         device->missing = 0;
460                 }
461         }
462
463         if (found_transid > fs_devices->latest_trans) {
464                 fs_devices->latest_devid = devid;
465                 fs_devices->latest_trans = found_transid;
466         }
467         *fs_devices_ret = fs_devices;
468         return 0;
469 }
470
471 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
472 {
473         struct btrfs_fs_devices *fs_devices;
474         struct btrfs_device *device;
475         struct btrfs_device *orig_dev;
476
477         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
478         if (!fs_devices)
479                 return ERR_PTR(-ENOMEM);
480
481         INIT_LIST_HEAD(&fs_devices->devices);
482         INIT_LIST_HEAD(&fs_devices->alloc_list);
483         INIT_LIST_HEAD(&fs_devices->list);
484         mutex_init(&fs_devices->device_list_mutex);
485         fs_devices->latest_devid = orig->latest_devid;
486         fs_devices->latest_trans = orig->latest_trans;
487         fs_devices->total_devices = orig->total_devices;
488         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
489
490         /* We have held the volume lock, it is safe to get the devices. */
491         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
492                 struct rcu_string *name;
493
494                 device = kzalloc(sizeof(*device), GFP_NOFS);
495                 if (!device)
496                         goto error;
497
498                 /*
499                  * This is ok to do without rcu read locked because we hold the
500                  * uuid mutex so nothing we touch in here is going to disappear.
501                  */
502                 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
503                 if (!name) {
504                         kfree(device);
505                         goto error;
506                 }
507                 rcu_assign_pointer(device->name, name);
508
509                 device->devid = orig_dev->devid;
510                 device->work.func = pending_bios_fn;
511                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
512                 spin_lock_init(&device->io_lock);
513                 INIT_LIST_HEAD(&device->dev_list);
514                 INIT_LIST_HEAD(&device->dev_alloc_list);
515
516                 list_add(&device->dev_list, &fs_devices->devices);
517                 device->fs_devices = fs_devices;
518                 fs_devices->num_devices++;
519         }
520         return fs_devices;
521 error:
522         free_fs_devices(fs_devices);
523         return ERR_PTR(-ENOMEM);
524 }
525
526 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
527                                struct btrfs_fs_devices *fs_devices, int step)
528 {
529         struct btrfs_device *device, *next;
530
531         struct block_device *latest_bdev = NULL;
532         u64 latest_devid = 0;
533         u64 latest_transid = 0;
534
535         mutex_lock(&uuid_mutex);
536 again:
537         /* This is the initialized path, it is safe to release the devices. */
538         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
539                 if (device->in_fs_metadata) {
540                         if (!device->is_tgtdev_for_dev_replace &&
541                             (!latest_transid ||
542                              device->generation > latest_transid)) {
543                                 latest_devid = device->devid;
544                                 latest_transid = device->generation;
545                                 latest_bdev = device->bdev;
546                         }
547                         continue;
548                 }
549
550                 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
551                         /*
552                          * In the first step, keep the device which has
553                          * the correct fsid and the devid that is used
554                          * for the dev_replace procedure.
555                          * In the second step, the dev_replace state is
556                          * read from the device tree and it is known
557                          * whether the procedure is really active or
558                          * not, which means whether this device is
559                          * used or whether it should be removed.
560                          */
561                         if (step == 0 || device->is_tgtdev_for_dev_replace) {
562                                 continue;
563                         }
564                 }
565                 if (device->bdev) {
566                         blkdev_put(device->bdev, device->mode);
567                         device->bdev = NULL;
568                         fs_devices->open_devices--;
569                 }
570                 if (device->writeable) {
571                         list_del_init(&device->dev_alloc_list);
572                         device->writeable = 0;
573                         if (!device->is_tgtdev_for_dev_replace)
574                                 fs_devices->rw_devices--;
575                 }
576                 list_del_init(&device->dev_list);
577                 fs_devices->num_devices--;
578                 rcu_string_free(device->name);
579                 kfree(device);
580         }
581
582         if (fs_devices->seed) {
583                 fs_devices = fs_devices->seed;
584                 goto again;
585         }
586
587         fs_devices->latest_bdev = latest_bdev;
588         fs_devices->latest_devid = latest_devid;
589         fs_devices->latest_trans = latest_transid;
590
591         mutex_unlock(&uuid_mutex);
592 }
593
594 static void __free_device(struct work_struct *work)
595 {
596         struct btrfs_device *device;
597
598         device = container_of(work, struct btrfs_device, rcu_work);
599
600         if (device->bdev)
601                 blkdev_put(device->bdev, device->mode);
602
603         rcu_string_free(device->name);
604         kfree(device);
605 }
606
607 static void free_device(struct rcu_head *head)
608 {
609         struct btrfs_device *device;
610
611         device = container_of(head, struct btrfs_device, rcu);
612
613         INIT_WORK(&device->rcu_work, __free_device);
614         schedule_work(&device->rcu_work);
615 }
616
617 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
618 {
619         struct btrfs_device *device;
620
621         if (--fs_devices->opened > 0)
622                 return 0;
623
624         mutex_lock(&fs_devices->device_list_mutex);
625         list_for_each_entry(device, &fs_devices->devices, dev_list) {
626                 struct btrfs_device *new_device;
627                 struct rcu_string *name;
628
629                 if (device->bdev)
630                         fs_devices->open_devices--;
631
632                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
633                         list_del_init(&device->dev_alloc_list);
634                         fs_devices->rw_devices--;
635                 }
636
637                 if (device->can_discard)
638                         fs_devices->num_can_discard--;
639
640                 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
641                 BUG_ON(!new_device); /* -ENOMEM */
642                 memcpy(new_device, device, sizeof(*new_device));
643
644                 /* Safe because we are under uuid_mutex */
645                 if (device->name) {
646                         name = rcu_string_strdup(device->name->str, GFP_NOFS);
647                         BUG_ON(device->name && !name); /* -ENOMEM */
648                         rcu_assign_pointer(new_device->name, name);
649                 }
650                 new_device->bdev = NULL;
651                 new_device->writeable = 0;
652                 new_device->in_fs_metadata = 0;
653                 new_device->can_discard = 0;
654                 spin_lock_init(&new_device->io_lock);
655                 list_replace_rcu(&device->dev_list, &new_device->dev_list);
656
657                 call_rcu(&device->rcu, free_device);
658         }
659         mutex_unlock(&fs_devices->device_list_mutex);
660
661         WARN_ON(fs_devices->open_devices);
662         WARN_ON(fs_devices->rw_devices);
663         fs_devices->opened = 0;
664         fs_devices->seeding = 0;
665
666         return 0;
667 }
668
669 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
670 {
671         struct btrfs_fs_devices *seed_devices = NULL;
672         int ret;
673
674         mutex_lock(&uuid_mutex);
675         ret = __btrfs_close_devices(fs_devices);
676         if (!fs_devices->opened) {
677                 seed_devices = fs_devices->seed;
678                 fs_devices->seed = NULL;
679         }
680         mutex_unlock(&uuid_mutex);
681
682         while (seed_devices) {
683                 fs_devices = seed_devices;
684                 seed_devices = fs_devices->seed;
685                 __btrfs_close_devices(fs_devices);
686                 free_fs_devices(fs_devices);
687         }
688         /*
689          * Wait for rcu kworkers under __btrfs_close_devices
690          * to finish all blkdev_puts so device is really
691          * free when umount is done.
692          */
693         rcu_barrier();
694         return ret;
695 }
696
697 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
698                                 fmode_t flags, void *holder)
699 {
700         struct request_queue *q;
701         struct block_device *bdev;
702         struct list_head *head = &fs_devices->devices;
703         struct btrfs_device *device;
704         struct block_device *latest_bdev = NULL;
705         struct buffer_head *bh;
706         struct btrfs_super_block *disk_super;
707         u64 latest_devid = 0;
708         u64 latest_transid = 0;
709         u64 devid;
710         int seeding = 1;
711         int ret = 0;
712
713         flags |= FMODE_EXCL;
714
715         list_for_each_entry(device, head, dev_list) {
716                 if (device->bdev)
717                         continue;
718                 if (!device->name)
719                         continue;
720
721                 /* Just open everything we can; ignore failures here */
722                 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
723                                             &bdev, &bh))
724                         continue;
725
726                 disk_super = (struct btrfs_super_block *)bh->b_data;
727                 devid = btrfs_stack_device_id(&disk_super->dev_item);
728                 if (devid != device->devid)
729                         goto error_brelse;
730
731                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
732                            BTRFS_UUID_SIZE))
733                         goto error_brelse;
734
735                 device->generation = btrfs_super_generation(disk_super);
736                 if (!latest_transid || device->generation > latest_transid) {
737                         latest_devid = devid;
738                         latest_transid = device->generation;
739                         latest_bdev = bdev;
740                 }
741
742                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
743                         device->writeable = 0;
744                 } else {
745                         device->writeable = !bdev_read_only(bdev);
746                         seeding = 0;
747                 }
748
749                 q = bdev_get_queue(bdev);
750                 if (blk_queue_discard(q)) {
751                         device->can_discard = 1;
752                         fs_devices->num_can_discard++;
753                 }
754
755                 device->bdev = bdev;
756                 device->in_fs_metadata = 0;
757                 device->mode = flags;
758
759                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
760                         fs_devices->rotating = 1;
761
762                 fs_devices->open_devices++;
763                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
764                         fs_devices->rw_devices++;
765                         list_add(&device->dev_alloc_list,
766                                  &fs_devices->alloc_list);
767                 }
768                 brelse(bh);
769                 continue;
770
771 error_brelse:
772                 brelse(bh);
773                 blkdev_put(bdev, flags);
774                 continue;
775         }
776         if (fs_devices->open_devices == 0) {
777                 ret = -EINVAL;
778                 goto out;
779         }
780         fs_devices->seeding = seeding;
781         fs_devices->opened = 1;
782         fs_devices->latest_bdev = latest_bdev;
783         fs_devices->latest_devid = latest_devid;
784         fs_devices->latest_trans = latest_transid;
785         fs_devices->total_rw_bytes = 0;
786 out:
787         return ret;
788 }
789
790 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
791                        fmode_t flags, void *holder)
792 {
793         int ret;
794
795         mutex_lock(&uuid_mutex);
796         if (fs_devices->opened) {
797                 fs_devices->opened++;
798                 ret = 0;
799         } else {
800                 ret = __btrfs_open_devices(fs_devices, flags, holder);
801         }
802         mutex_unlock(&uuid_mutex);
803         return ret;
804 }
805
806 /*
807  * Look for a btrfs signature on a device. This may be called out of the mount path
808  * and we are not allowed to call set_blocksize during the scan. The superblock
809  * is read via pagecache
810  */
811 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
812                           struct btrfs_fs_devices **fs_devices_ret)
813 {
814         struct btrfs_super_block *disk_super;
815         struct block_device *bdev;
816         struct page *page;
817         void *p;
818         int ret = -EINVAL;
819         u64 devid;
820         u64 transid;
821         u64 total_devices;
822         u64 bytenr;
823         pgoff_t index;
824
825         /*
826          * we would like to check all the supers, but that would make
827          * a btrfs mount succeed after a mkfs from a different FS.
828          * So, we need to add a special mount option to scan for
829          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
830          */
831         bytenr = btrfs_sb_offset(0);
832         flags |= FMODE_EXCL;
833         mutex_lock(&uuid_mutex);
834
835         bdev = blkdev_get_by_path(path, flags, holder);
836
837         if (IS_ERR(bdev)) {
838                 ret = PTR_ERR(bdev);
839                 goto error;
840         }
841
842         /* make sure our super fits in the device */
843         if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
844                 goto error_bdev_put;
845
846         /* make sure our super fits in the page */
847         if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
848                 goto error_bdev_put;
849
850         /* make sure our super doesn't straddle pages on disk */
851         index = bytenr >> PAGE_CACHE_SHIFT;
852         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
853                 goto error_bdev_put;
854
855         /* pull in the page with our super */
856         page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
857                                    index, GFP_NOFS);
858
859         if (IS_ERR_OR_NULL(page))
860                 goto error_bdev_put;
861
862         p = kmap(page);
863
864         /* align our pointer to the offset of the super block */
865         disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
866
867         if (btrfs_super_bytenr(disk_super) != bytenr ||
868             disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
869                 goto error_unmap;
870
871         devid = btrfs_stack_device_id(&disk_super->dev_item);
872         transid = btrfs_super_generation(disk_super);
873         total_devices = btrfs_super_num_devices(disk_super);
874
875         if (disk_super->label[0]) {
876                 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
877                         disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
878                 printk(KERN_INFO "device label %s ", disk_super->label);
879         } else {
880                 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
881         }
882
883         printk(KERN_CONT "devid %llu transid %llu %s\n",
884                (unsigned long long)devid, (unsigned long long)transid, path);
885
886         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
887         if (!ret && fs_devices_ret)
888                 (*fs_devices_ret)->total_devices = total_devices;
889
890 error_unmap:
891         kunmap(page);
892         page_cache_release(page);
893
894 error_bdev_put:
895         blkdev_put(bdev, flags);
896 error:
897         mutex_unlock(&uuid_mutex);
898         return ret;
899 }
900
901 /* helper to account the used device space in the range */
902 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
903                                    u64 end, u64 *length)
904 {
905         struct btrfs_key key;
906         struct btrfs_root *root = device->dev_root;
907         struct btrfs_dev_extent *dev_extent;
908         struct btrfs_path *path;
909         u64 extent_end;
910         int ret;
911         int slot;
912         struct extent_buffer *l;
913
914         *length = 0;
915
916         if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
917                 return 0;
918
919         path = btrfs_alloc_path();
920         if (!path)
921                 return -ENOMEM;
922         path->reada = 2;
923
924         key.objectid = device->devid;
925         key.offset = start;
926         key.type = BTRFS_DEV_EXTENT_KEY;
927
928         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
929         if (ret < 0)
930                 goto out;
931         if (ret > 0) {
932                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
933                 if (ret < 0)
934                         goto out;
935         }
936
937         while (1) {
938                 l = path->nodes[0];
939                 slot = path->slots[0];
940                 if (slot >= btrfs_header_nritems(l)) {
941                         ret = btrfs_next_leaf(root, path);
942                         if (ret == 0)
943                                 continue;
944                         if (ret < 0)
945                                 goto out;
946
947                         break;
948                 }
949                 btrfs_item_key_to_cpu(l, &key, slot);
950
951                 if (key.objectid < device->devid)
952                         goto next;
953
954                 if (key.objectid > device->devid)
955                         break;
956
957                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
958                         goto next;
959
960                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
961                 extent_end = key.offset + btrfs_dev_extent_length(l,
962                                                                   dev_extent);
963                 if (key.offset <= start && extent_end > end) {
964                         *length = end - start + 1;
965                         break;
966                 } else if (key.offset <= start && extent_end > start)
967                         *length += extent_end - start;
968                 else if (key.offset > start && extent_end <= end)
969                         *length += extent_end - key.offset;
970                 else if (key.offset > start && key.offset <= end) {
971                         *length += end - key.offset + 1;
972                         break;
973                 } else if (key.offset > end)
974                         break;
975
976 next:
977                 path->slots[0]++;
978         }
979         ret = 0;
980 out:
981         btrfs_free_path(path);
982         return ret;
983 }
984
985 static int contains_pending_extent(struct btrfs_trans_handle *trans,
986                                    struct btrfs_device *device,
987                                    u64 *start, u64 len)
988 {
989         struct extent_map *em;
990         int ret = 0;
991
992         list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
993                 struct map_lookup *map;
994                 int i;
995
996                 map = (struct map_lookup *)em->bdev;
997                 for (i = 0; i < map->num_stripes; i++) {
998                         if (map->stripes[i].dev != device)
999                                 continue;
1000                         if (map->stripes[i].physical >= *start + len ||
1001                             map->stripes[i].physical + em->orig_block_len <=
1002                             *start)
1003                                 continue;
1004                         *start = map->stripes[i].physical +
1005                                 em->orig_block_len;
1006                         ret = 1;
1007                 }
1008         }
1009
1010         return ret;
1011 }
1012
1013
1014 /*
1015  * find_free_dev_extent - find free space in the specified device
1016  * @device:     the device which we search the free space in
1017  * @num_bytes:  the size of the free space that we need
1018  * @start:      store the start of the free space.
1019  * @len:        the size of the free space. that we find, or the size of the max
1020  *              free space if we don't find suitable free space
1021  *
1022  * this uses a pretty simple search, the expectation is that it is
1023  * called very infrequently and that a given device has a small number
1024  * of extents
1025  *
1026  * @start is used to store the start of the free space if we find. But if we
1027  * don't find suitable free space, it will be used to store the start position
1028  * of the max free space.
1029  *
1030  * @len is used to store the size of the free space that we find.
1031  * But if we don't find suitable free space, it is used to store the size of
1032  * the max free space.
1033  */
1034 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1035                          struct btrfs_device *device, u64 num_bytes,
1036                          u64 *start, u64 *len)
1037 {
1038         struct btrfs_key key;
1039         struct btrfs_root *root = device->dev_root;
1040         struct btrfs_dev_extent *dev_extent;
1041         struct btrfs_path *path;
1042         u64 hole_size;
1043         u64 max_hole_start;
1044         u64 max_hole_size;
1045         u64 extent_end;
1046         u64 search_start;
1047         u64 search_end = device->total_bytes;
1048         int ret;
1049         int slot;
1050         struct extent_buffer *l;
1051
1052         /* FIXME use last free of some kind */
1053
1054         /* we don't want to overwrite the superblock on the drive,
1055          * so we make sure to start at an offset of at least 1MB
1056          */
1057         search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1058
1059         path = btrfs_alloc_path();
1060         if (!path)
1061                 return -ENOMEM;
1062 again:
1063         max_hole_start = search_start;
1064         max_hole_size = 0;
1065         hole_size = 0;
1066
1067         if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1068                 ret = -ENOSPC;
1069                 goto out;
1070         }
1071
1072         path->reada = 2;
1073         path->search_commit_root = 1;
1074         path->skip_locking = 1;
1075
1076         key.objectid = device->devid;
1077         key.offset = search_start;
1078         key.type = BTRFS_DEV_EXTENT_KEY;
1079
1080         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1081         if (ret < 0)
1082                 goto out;
1083         if (ret > 0) {
1084                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1085                 if (ret < 0)
1086                         goto out;
1087         }
1088
1089         while (1) {
1090                 l = path->nodes[0];
1091                 slot = path->slots[0];
1092                 if (slot >= btrfs_header_nritems(l)) {
1093                         ret = btrfs_next_leaf(root, path);
1094                         if (ret == 0)
1095                                 continue;
1096                         if (ret < 0)
1097                                 goto out;
1098
1099                         break;
1100                 }
1101                 btrfs_item_key_to_cpu(l, &key, slot);
1102
1103                 if (key.objectid < device->devid)
1104                         goto next;
1105
1106                 if (key.objectid > device->devid)
1107                         break;
1108
1109                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1110                         goto next;
1111
1112                 if (key.offset > search_start) {
1113                         hole_size = key.offset - search_start;
1114
1115                         /*
1116                          * Have to check before we set max_hole_start, otherwise
1117                          * we could end up sending back this offset anyway.
1118                          */
1119                         if (contains_pending_extent(trans, device,
1120                                                     &search_start,
1121                                                     hole_size))
1122                                 hole_size = 0;
1123
1124                         if (hole_size > max_hole_size) {
1125                                 max_hole_start = search_start;
1126                                 max_hole_size = hole_size;
1127                         }
1128
1129                         /*
1130                          * If this free space is greater than which we need,
1131                          * it must be the max free space that we have found
1132                          * until now, so max_hole_start must point to the start
1133                          * of this free space and the length of this free space
1134                          * is stored in max_hole_size. Thus, we return
1135                          * max_hole_start and max_hole_size and go back to the
1136                          * caller.
1137                          */
1138                         if (hole_size >= num_bytes) {
1139                                 ret = 0;
1140                                 goto out;
1141                         }
1142                 }
1143
1144                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1145                 extent_end = key.offset + btrfs_dev_extent_length(l,
1146                                                                   dev_extent);
1147                 if (extent_end > search_start)
1148                         search_start = extent_end;
1149 next:
1150                 path->slots[0]++;
1151                 cond_resched();
1152         }
1153
1154         /*
1155          * At this point, search_start should be the end of
1156          * allocated dev extents, and when shrinking the device,
1157          * search_end may be smaller than search_start.
1158          */
1159         if (search_end > search_start)
1160                 hole_size = search_end - search_start;
1161
1162         if (hole_size > max_hole_size) {
1163                 max_hole_start = search_start;
1164                 max_hole_size = hole_size;
1165         }
1166
1167         if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1168                 btrfs_release_path(path);
1169                 goto again;
1170         }
1171
1172         /* See above. */
1173         if (hole_size < num_bytes)
1174                 ret = -ENOSPC;
1175         else
1176                 ret = 0;
1177
1178 out:
1179         btrfs_free_path(path);
1180         *start = max_hole_start;
1181         if (len)
1182                 *len = max_hole_size;
1183         return ret;
1184 }
1185
1186 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1187                           struct btrfs_device *device,
1188                           u64 start)
1189 {
1190         int ret;
1191         struct btrfs_path *path;
1192         struct btrfs_root *root = device->dev_root;
1193         struct btrfs_key key;
1194         struct btrfs_key found_key;
1195         struct extent_buffer *leaf = NULL;
1196         struct btrfs_dev_extent *extent = NULL;
1197
1198         path = btrfs_alloc_path();
1199         if (!path)
1200                 return -ENOMEM;
1201
1202         key.objectid = device->devid;
1203         key.offset = start;
1204         key.type = BTRFS_DEV_EXTENT_KEY;
1205 again:
1206         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1207         if (ret > 0) {
1208                 ret = btrfs_previous_item(root, path, key.objectid,
1209                                           BTRFS_DEV_EXTENT_KEY);
1210                 if (ret)
1211                         goto out;
1212                 leaf = path->nodes[0];
1213                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1214                 extent = btrfs_item_ptr(leaf, path->slots[0],
1215                                         struct btrfs_dev_extent);
1216                 BUG_ON(found_key.offset > start || found_key.offset +
1217                        btrfs_dev_extent_length(leaf, extent) < start);
1218                 key = found_key;
1219                 btrfs_release_path(path);
1220                 goto again;
1221         } else if (ret == 0) {
1222                 leaf = path->nodes[0];
1223                 extent = btrfs_item_ptr(leaf, path->slots[0],
1224                                         struct btrfs_dev_extent);
1225         } else {
1226                 btrfs_error(root->fs_info, ret, "Slot search failed");
1227                 goto out;
1228         }
1229
1230         if (device->bytes_used > 0) {
1231                 u64 len = btrfs_dev_extent_length(leaf, extent);
1232                 device->bytes_used -= len;
1233                 spin_lock(&root->fs_info->free_chunk_lock);
1234                 root->fs_info->free_chunk_space += len;
1235                 spin_unlock(&root->fs_info->free_chunk_lock);
1236         }
1237         ret = btrfs_del_item(trans, root, path);
1238         if (ret) {
1239                 btrfs_error(root->fs_info, ret,
1240                             "Failed to remove dev extent item");
1241         }
1242 out:
1243         btrfs_free_path(path);
1244         return ret;
1245 }
1246
1247 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1248                                   struct btrfs_device *device,
1249                                   u64 chunk_tree, u64 chunk_objectid,
1250                                   u64 chunk_offset, u64 start, u64 num_bytes)
1251 {
1252         int ret;
1253         struct btrfs_path *path;
1254         struct btrfs_root *root = device->dev_root;
1255         struct btrfs_dev_extent *extent;
1256         struct extent_buffer *leaf;
1257         struct btrfs_key key;
1258
1259         WARN_ON(!device->in_fs_metadata);
1260         WARN_ON(device->is_tgtdev_for_dev_replace);
1261         path = btrfs_alloc_path();
1262         if (!path)
1263                 return -ENOMEM;
1264
1265         key.objectid = device->devid;
1266         key.offset = start;
1267         key.type = BTRFS_DEV_EXTENT_KEY;
1268         ret = btrfs_insert_empty_item(trans, root, path, &key,
1269                                       sizeof(*extent));
1270         if (ret)
1271                 goto out;
1272
1273         leaf = path->nodes[0];
1274         extent = btrfs_item_ptr(leaf, path->slots[0],
1275                                 struct btrfs_dev_extent);
1276         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1277         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1278         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1279
1280         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1281                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1282                     BTRFS_UUID_SIZE);
1283
1284         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1285         btrfs_mark_buffer_dirty(leaf);
1286 out:
1287         btrfs_free_path(path);
1288         return ret;
1289 }
1290
1291 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1292 {
1293         struct extent_map_tree *em_tree;
1294         struct extent_map *em;
1295         struct rb_node *n;
1296         u64 ret = 0;
1297
1298         em_tree = &fs_info->mapping_tree.map_tree;
1299         read_lock(&em_tree->lock);
1300         n = rb_last(&em_tree->map);
1301         if (n) {
1302                 em = rb_entry(n, struct extent_map, rb_node);
1303                 ret = em->start + em->len;
1304         }
1305         read_unlock(&em_tree->lock);
1306
1307         return ret;
1308 }
1309
1310 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1311 {
1312         int ret;
1313         struct btrfs_key key;
1314         struct btrfs_key found_key;
1315         struct btrfs_path *path;
1316
1317         root = root->fs_info->chunk_root;
1318
1319         path = btrfs_alloc_path();
1320         if (!path)
1321                 return -ENOMEM;
1322
1323         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1324         key.type = BTRFS_DEV_ITEM_KEY;
1325         key.offset = (u64)-1;
1326
1327         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1328         if (ret < 0)
1329                 goto error;
1330
1331         BUG_ON(ret == 0); /* Corruption */
1332
1333         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1334                                   BTRFS_DEV_ITEM_KEY);
1335         if (ret) {
1336                 *objectid = 1;
1337         } else {
1338                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1339                                       path->slots[0]);
1340                 *objectid = found_key.offset + 1;
1341         }
1342         ret = 0;
1343 error:
1344         btrfs_free_path(path);
1345         return ret;
1346 }
1347
1348 /*
1349  * the device information is stored in the chunk root
1350  * the btrfs_device struct should be fully filled in
1351  */
1352 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1353                             struct btrfs_root *root,
1354                             struct btrfs_device *device)
1355 {
1356         int ret;
1357         struct btrfs_path *path;
1358         struct btrfs_dev_item *dev_item;
1359         struct extent_buffer *leaf;
1360         struct btrfs_key key;
1361         unsigned long ptr;
1362
1363         root = root->fs_info->chunk_root;
1364
1365         path = btrfs_alloc_path();
1366         if (!path)
1367                 return -ENOMEM;
1368
1369         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1370         key.type = BTRFS_DEV_ITEM_KEY;
1371         key.offset = device->devid;
1372
1373         ret = btrfs_insert_empty_item(trans, root, path, &key,
1374                                       sizeof(*dev_item));
1375         if (ret)
1376                 goto out;
1377
1378         leaf = path->nodes[0];
1379         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1380
1381         btrfs_set_device_id(leaf, dev_item, device->devid);
1382         btrfs_set_device_generation(leaf, dev_item, 0);
1383         btrfs_set_device_type(leaf, dev_item, device->type);
1384         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1385         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1386         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1387         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1388         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1389         btrfs_set_device_group(leaf, dev_item, 0);
1390         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1391         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1392         btrfs_set_device_start_offset(leaf, dev_item, 0);
1393
1394         ptr = (unsigned long)btrfs_device_uuid(dev_item);
1395         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1396         ptr = (unsigned long)btrfs_device_fsid(dev_item);
1397         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1398         btrfs_mark_buffer_dirty(leaf);
1399
1400         ret = 0;
1401 out:
1402         btrfs_free_path(path);
1403         return ret;
1404 }
1405
1406 static int btrfs_rm_dev_item(struct btrfs_root *root,
1407                              struct btrfs_device *device)
1408 {
1409         int ret;
1410         struct btrfs_path *path;
1411         struct btrfs_key key;
1412         struct btrfs_trans_handle *trans;
1413
1414         root = root->fs_info->chunk_root;
1415
1416         path = btrfs_alloc_path();
1417         if (!path)
1418                 return -ENOMEM;
1419
1420         trans = btrfs_start_transaction(root, 0);
1421         if (IS_ERR(trans)) {
1422                 btrfs_free_path(path);
1423                 return PTR_ERR(trans);
1424         }
1425         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1426         key.type = BTRFS_DEV_ITEM_KEY;
1427         key.offset = device->devid;
1428         lock_chunks(root);
1429
1430         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1431         if (ret < 0)
1432                 goto out;
1433
1434         if (ret > 0) {
1435                 ret = -ENOENT;
1436                 goto out;
1437         }
1438
1439         ret = btrfs_del_item(trans, root, path);
1440         if (ret)
1441                 goto out;
1442 out:
1443         btrfs_free_path(path);
1444         unlock_chunks(root);
1445         btrfs_commit_transaction(trans, root);
1446         return ret;
1447 }
1448
1449 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1450 {
1451         struct btrfs_device *device;
1452         struct btrfs_device *next_device;
1453         struct block_device *bdev;
1454         struct buffer_head *bh = NULL;
1455         struct btrfs_super_block *disk_super;
1456         struct btrfs_fs_devices *cur_devices;
1457         u64 all_avail;
1458         u64 devid;
1459         u64 num_devices;
1460         u8 *dev_uuid;
1461         unsigned seq;
1462         int ret = 0;
1463         bool clear_super = false;
1464
1465         mutex_lock(&uuid_mutex);
1466
1467         do {
1468                 seq = read_seqbegin(&root->fs_info->profiles_lock);
1469
1470                 all_avail = root->fs_info->avail_data_alloc_bits |
1471                             root->fs_info->avail_system_alloc_bits |
1472                             root->fs_info->avail_metadata_alloc_bits;
1473         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1474
1475         num_devices = root->fs_info->fs_devices->num_devices;
1476         btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1477         if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1478                 WARN_ON(num_devices < 1);
1479                 num_devices--;
1480         }
1481         btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1482
1483         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1484                 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1485                 goto out;
1486         }
1487
1488         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1489                 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1490                 goto out;
1491         }
1492
1493         if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1494             root->fs_info->fs_devices->rw_devices <= 2) {
1495                 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1496                 goto out;
1497         }
1498         if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1499             root->fs_info->fs_devices->rw_devices <= 3) {
1500                 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1501                 goto out;
1502         }
1503
1504         if (strcmp(device_path, "missing") == 0) {
1505                 struct list_head *devices;
1506                 struct btrfs_device *tmp;
1507
1508                 device = NULL;
1509                 devices = &root->fs_info->fs_devices->devices;
1510                 /*
1511                  * It is safe to read the devices since the volume_mutex
1512                  * is held.
1513                  */
1514                 list_for_each_entry(tmp, devices, dev_list) {
1515                         if (tmp->in_fs_metadata &&
1516                             !tmp->is_tgtdev_for_dev_replace &&
1517                             !tmp->bdev) {
1518                                 device = tmp;
1519                                 break;
1520                         }
1521                 }
1522                 bdev = NULL;
1523                 bh = NULL;
1524                 disk_super = NULL;
1525                 if (!device) {
1526                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1527                         goto out;
1528                 }
1529         } else {
1530                 ret = btrfs_get_bdev_and_sb(device_path,
1531                                             FMODE_WRITE | FMODE_EXCL,
1532                                             root->fs_info->bdev_holder, 0,
1533                                             &bdev, &bh);
1534                 if (ret)
1535                         goto out;
1536                 disk_super = (struct btrfs_super_block *)bh->b_data;
1537                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1538                 dev_uuid = disk_super->dev_item.uuid;
1539                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1540                                            disk_super->fsid);
1541                 if (!device) {
1542                         ret = -ENOENT;
1543                         goto error_brelse;
1544                 }
1545         }
1546
1547         if (device->is_tgtdev_for_dev_replace) {
1548                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1549                 goto error_brelse;
1550         }
1551
1552         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1553                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1554                 goto error_brelse;
1555         }
1556
1557         if (device->writeable) {
1558                 lock_chunks(root);
1559                 list_del_init(&device->dev_alloc_list);
1560                 unlock_chunks(root);
1561                 root->fs_info->fs_devices->rw_devices--;
1562                 clear_super = true;
1563         }
1564
1565         ret = btrfs_shrink_device(device, 0);
1566         if (ret)
1567                 goto error_undo;
1568
1569         /*
1570          * TODO: the superblock still includes this device in its num_devices
1571          * counter although write_all_supers() is not locked out. This
1572          * could give a filesystem state which requires a degraded mount.
1573          */
1574         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1575         if (ret)
1576                 goto error_undo;
1577
1578         spin_lock(&root->fs_info->free_chunk_lock);
1579         root->fs_info->free_chunk_space = device->total_bytes -
1580                 device->bytes_used;
1581         spin_unlock(&root->fs_info->free_chunk_lock);
1582
1583         device->in_fs_metadata = 0;
1584         btrfs_scrub_cancel_dev(root->fs_info, device);
1585
1586         /*
1587          * the device list mutex makes sure that we don't change
1588          * the device list while someone else is writing out all
1589          * the device supers.
1590          */
1591
1592         cur_devices = device->fs_devices;
1593         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1594         list_del_rcu(&device->dev_list);
1595
1596         device->fs_devices->num_devices--;
1597         device->fs_devices->total_devices--;
1598
1599         if (device->missing)
1600                 root->fs_info->fs_devices->missing_devices--;
1601
1602         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1603                                  struct btrfs_device, dev_list);
1604         if (device->bdev == root->fs_info->sb->s_bdev)
1605                 root->fs_info->sb->s_bdev = next_device->bdev;
1606         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1607                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1608
1609         if (device->bdev)
1610                 device->fs_devices->open_devices--;
1611
1612         call_rcu(&device->rcu, free_device);
1613         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1614
1615         num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1616         btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1617
1618         if (cur_devices->open_devices == 0) {
1619                 struct btrfs_fs_devices *fs_devices;
1620                 fs_devices = root->fs_info->fs_devices;
1621                 while (fs_devices) {
1622                         if (fs_devices->seed == cur_devices)
1623                                 break;
1624                         fs_devices = fs_devices->seed;
1625                 }
1626                 fs_devices->seed = cur_devices->seed;
1627                 cur_devices->seed = NULL;
1628                 lock_chunks(root);
1629                 __btrfs_close_devices(cur_devices);
1630                 unlock_chunks(root);
1631                 free_fs_devices(cur_devices);
1632         }
1633
1634         root->fs_info->num_tolerated_disk_barrier_failures =
1635                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1636
1637         /*
1638          * at this point, the device is zero sized.  We want to
1639          * remove it from the devices list and zero out the old super
1640          */
1641         if (clear_super && disk_super) {
1642                 /* make sure this device isn't detected as part of
1643                  * the FS anymore
1644                  */
1645                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1646                 set_buffer_dirty(bh);
1647                 sync_dirty_buffer(bh);
1648         }
1649
1650         ret = 0;
1651
1652         /* Notify udev that device has changed */
1653         if (bdev)
1654                 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1655
1656 error_brelse:
1657         brelse(bh);
1658         if (bdev)
1659                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1660 out:
1661         mutex_unlock(&uuid_mutex);
1662         return ret;
1663 error_undo:
1664         if (device->writeable) {
1665                 lock_chunks(root);
1666                 list_add(&device->dev_alloc_list,
1667                          &root->fs_info->fs_devices->alloc_list);
1668                 unlock_chunks(root);
1669                 root->fs_info->fs_devices->rw_devices++;
1670         }
1671         goto error_brelse;
1672 }
1673
1674 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1675                                  struct btrfs_device *srcdev)
1676 {
1677         WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1678         list_del_rcu(&srcdev->dev_list);
1679         list_del_rcu(&srcdev->dev_alloc_list);
1680         fs_info->fs_devices->num_devices--;
1681         if (srcdev->missing) {
1682                 fs_info->fs_devices->missing_devices--;
1683                 fs_info->fs_devices->rw_devices++;
1684         }
1685         if (srcdev->can_discard)
1686                 fs_info->fs_devices->num_can_discard--;
1687         if (srcdev->bdev)
1688                 fs_info->fs_devices->open_devices--;
1689
1690         call_rcu(&srcdev->rcu, free_device);
1691 }
1692
1693 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1694                                       struct btrfs_device *tgtdev)
1695 {
1696         struct btrfs_device *next_device;
1697
1698         WARN_ON(!tgtdev);
1699         mutex_lock(&fs_info->fs_devices->device_list_mutex);
1700         if (tgtdev->bdev) {
1701                 btrfs_scratch_superblock(tgtdev);
1702                 fs_info->fs_devices->open_devices--;
1703         }
1704         fs_info->fs_devices->num_devices--;
1705         if (tgtdev->can_discard)
1706                 fs_info->fs_devices->num_can_discard++;
1707
1708         next_device = list_entry(fs_info->fs_devices->devices.next,
1709                                  struct btrfs_device, dev_list);
1710         if (tgtdev->bdev == fs_info->sb->s_bdev)
1711                 fs_info->sb->s_bdev = next_device->bdev;
1712         if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1713                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1714         list_del_rcu(&tgtdev->dev_list);
1715
1716         call_rcu(&tgtdev->rcu, free_device);
1717
1718         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1719 }
1720
1721 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1722                                      struct btrfs_device **device)
1723 {
1724         int ret = 0;
1725         struct btrfs_super_block *disk_super;
1726         u64 devid;
1727         u8 *dev_uuid;
1728         struct block_device *bdev;
1729         struct buffer_head *bh;
1730
1731         *device = NULL;
1732         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1733                                     root->fs_info->bdev_holder, 0, &bdev, &bh);
1734         if (ret)
1735                 return ret;
1736         disk_super = (struct btrfs_super_block *)bh->b_data;
1737         devid = btrfs_stack_device_id(&disk_super->dev_item);
1738         dev_uuid = disk_super->dev_item.uuid;
1739         *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1740                                     disk_super->fsid);
1741         brelse(bh);
1742         if (!*device)
1743                 ret = -ENOENT;
1744         blkdev_put(bdev, FMODE_READ);
1745         return ret;
1746 }
1747
1748 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1749                                          char *device_path,
1750                                          struct btrfs_device **device)
1751 {
1752         *device = NULL;
1753         if (strcmp(device_path, "missing") == 0) {
1754                 struct list_head *devices;
1755                 struct btrfs_device *tmp;
1756
1757                 devices = &root->fs_info->fs_devices->devices;
1758                 /*
1759                  * It is safe to read the devices since the volume_mutex
1760                  * is held by the caller.
1761                  */
1762                 list_for_each_entry(tmp, devices, dev_list) {
1763                         if (tmp->in_fs_metadata && !tmp->bdev) {
1764                                 *device = tmp;
1765                                 break;
1766                         }
1767                 }
1768
1769                 if (!*device) {
1770                         pr_err("btrfs: no missing device found\n");
1771                         return -ENOENT;
1772                 }
1773
1774                 return 0;
1775         } else {
1776                 return btrfs_find_device_by_path(root, device_path, device);
1777         }
1778 }
1779
1780 /*
1781  * does all the dirty work required for changing file system's UUID.
1782  */
1783 static int btrfs_prepare_sprout(struct btrfs_root *root)
1784 {
1785         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1786         struct btrfs_fs_devices *old_devices;
1787         struct btrfs_fs_devices *seed_devices;
1788         struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1789         struct btrfs_device *device;
1790         u64 super_flags;
1791
1792         BUG_ON(!mutex_is_locked(&uuid_mutex));
1793         if (!fs_devices->seeding)
1794                 return -EINVAL;
1795
1796         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1797         if (!seed_devices)
1798                 return -ENOMEM;
1799
1800         old_devices = clone_fs_devices(fs_devices);
1801         if (IS_ERR(old_devices)) {
1802                 kfree(seed_devices);
1803                 return PTR_ERR(old_devices);
1804         }
1805
1806         list_add(&old_devices->list, &fs_uuids);
1807
1808         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1809         seed_devices->opened = 1;
1810         INIT_LIST_HEAD(&seed_devices->devices);
1811         INIT_LIST_HEAD(&seed_devices->alloc_list);
1812         mutex_init(&seed_devices->device_list_mutex);
1813
1814         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1815         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1816                               synchronize_rcu);
1817         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1818
1819         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1820         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1821                 device->fs_devices = seed_devices;
1822         }
1823
1824         fs_devices->seeding = 0;
1825         fs_devices->num_devices = 0;
1826         fs_devices->open_devices = 0;
1827         fs_devices->total_devices = 0;
1828         fs_devices->seed = seed_devices;
1829
1830         generate_random_uuid(fs_devices->fsid);
1831         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1832         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1833         super_flags = btrfs_super_flags(disk_super) &
1834                       ~BTRFS_SUPER_FLAG_SEEDING;
1835         btrfs_set_super_flags(disk_super, super_flags);
1836
1837         return 0;
1838 }
1839
1840 /*
1841  * strore the expected generation for seed devices in device items.
1842  */
1843 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1844                                struct btrfs_root *root)
1845 {
1846         struct btrfs_path *path;
1847         struct extent_buffer *leaf;
1848         struct btrfs_dev_item *dev_item;
1849         struct btrfs_device *device;
1850         struct btrfs_key key;
1851         u8 fs_uuid[BTRFS_UUID_SIZE];
1852         u8 dev_uuid[BTRFS_UUID_SIZE];
1853         u64 devid;
1854         int ret;
1855
1856         path = btrfs_alloc_path();
1857         if (!path)
1858                 return -ENOMEM;
1859
1860         root = root->fs_info->chunk_root;
1861         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1862         key.offset = 0;
1863         key.type = BTRFS_DEV_ITEM_KEY;
1864
1865         while (1) {
1866                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1867                 if (ret < 0)
1868                         goto error;
1869
1870                 leaf = path->nodes[0];
1871 next_slot:
1872                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1873                         ret = btrfs_next_leaf(root, path);
1874                         if (ret > 0)
1875                                 break;
1876                         if (ret < 0)
1877                                 goto error;
1878                         leaf = path->nodes[0];
1879                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1880                         btrfs_release_path(path);
1881                         continue;
1882                 }
1883
1884                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1885                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1886                     key.type != BTRFS_DEV_ITEM_KEY)
1887                         break;
1888
1889                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1890                                           struct btrfs_dev_item);
1891                 devid = btrfs_device_id(leaf, dev_item);
1892                 read_extent_buffer(leaf, dev_uuid,
1893                                    (unsigned long)btrfs_device_uuid(dev_item),
1894                                    BTRFS_UUID_SIZE);
1895                 read_extent_buffer(leaf, fs_uuid,
1896                                    (unsigned long)btrfs_device_fsid(dev_item),
1897                                    BTRFS_UUID_SIZE);
1898                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1899                                            fs_uuid);
1900                 BUG_ON(!device); /* Logic error */
1901
1902                 if (device->fs_devices->seeding) {
1903                         btrfs_set_device_generation(leaf, dev_item,
1904                                                     device->generation);
1905                         btrfs_mark_buffer_dirty(leaf);
1906                 }
1907
1908                 path->slots[0]++;
1909                 goto next_slot;
1910         }
1911         ret = 0;
1912 error:
1913         btrfs_free_path(path);
1914         return ret;
1915 }
1916
1917 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1918 {
1919         struct request_queue *q;
1920         struct btrfs_trans_handle *trans;
1921         struct btrfs_device *device;
1922         struct block_device *bdev;
1923         struct list_head *devices;
1924         struct super_block *sb = root->fs_info->sb;
1925         struct rcu_string *name;
1926         u64 total_bytes;
1927         int seeding_dev = 0;
1928         int ret = 0;
1929
1930         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1931                 return -EROFS;
1932
1933         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1934                                   root->fs_info->bdev_holder);
1935         if (IS_ERR(bdev))
1936                 return PTR_ERR(bdev);
1937
1938         if (root->fs_info->fs_devices->seeding) {
1939                 seeding_dev = 1;
1940                 down_write(&sb->s_umount);
1941                 mutex_lock(&uuid_mutex);
1942         }
1943
1944         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1945
1946         devices = &root->fs_info->fs_devices->devices;
1947
1948         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1949         list_for_each_entry(device, devices, dev_list) {
1950                 if (device->bdev == bdev) {
1951                         ret = -EEXIST;
1952                         mutex_unlock(
1953                                 &root->fs_info->fs_devices->device_list_mutex);
1954                         goto error;
1955                 }
1956         }
1957         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1958
1959         device = kzalloc(sizeof(*device), GFP_NOFS);
1960         if (!device) {
1961                 /* we can safely leave the fs_devices entry around */
1962                 ret = -ENOMEM;
1963                 goto error;
1964         }
1965
1966         name = rcu_string_strdup(device_path, GFP_NOFS);
1967         if (!name) {
1968                 kfree(device);
1969                 ret = -ENOMEM;
1970                 goto error;
1971         }
1972         rcu_assign_pointer(device->name, name);
1973
1974         ret = find_next_devid(root, &device->devid);
1975         if (ret) {
1976                 rcu_string_free(device->name);
1977                 kfree(device);
1978                 goto error;
1979         }
1980
1981         trans = btrfs_start_transaction(root, 0);
1982         if (IS_ERR(trans)) {
1983                 rcu_string_free(device->name);
1984                 kfree(device);
1985                 ret = PTR_ERR(trans);
1986                 goto error;
1987         }
1988
1989         lock_chunks(root);
1990
1991         q = bdev_get_queue(bdev);
1992         if (blk_queue_discard(q))
1993                 device->can_discard = 1;
1994         device->writeable = 1;
1995         device->work.func = pending_bios_fn;
1996         generate_random_uuid(device->uuid);
1997         spin_lock_init(&device->io_lock);
1998         device->generation = trans->transid;
1999         device->io_width = root->sectorsize;
2000         device->io_align = root->sectorsize;
2001         device->sector_size = root->sectorsize;
2002         device->total_bytes = i_size_read(bdev->bd_inode);
2003         device->disk_total_bytes = device->total_bytes;
2004         device->dev_root = root->fs_info->dev_root;
2005         device->bdev = bdev;
2006         device->in_fs_metadata = 1;
2007         device->is_tgtdev_for_dev_replace = 0;
2008         device->mode = FMODE_EXCL;
2009         set_blocksize(device->bdev, 4096);
2010
2011         if (seeding_dev) {
2012                 sb->s_flags &= ~MS_RDONLY;
2013                 ret = btrfs_prepare_sprout(root);
2014                 BUG_ON(ret); /* -ENOMEM */
2015         }
2016
2017         device->fs_devices = root->fs_info->fs_devices;
2018
2019         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2020         list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2021         list_add(&device->dev_alloc_list,
2022                  &root->fs_info->fs_devices->alloc_list);
2023         root->fs_info->fs_devices->num_devices++;
2024         root->fs_info->fs_devices->open_devices++;
2025         root->fs_info->fs_devices->rw_devices++;
2026         root->fs_info->fs_devices->total_devices++;
2027         if (device->can_discard)
2028                 root->fs_info->fs_devices->num_can_discard++;
2029         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2030
2031         spin_lock(&root->fs_info->free_chunk_lock);
2032         root->fs_info->free_chunk_space += device->total_bytes;
2033         spin_unlock(&root->fs_info->free_chunk_lock);
2034
2035         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2036                 root->fs_info->fs_devices->rotating = 1;
2037
2038         total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2039         btrfs_set_super_total_bytes(root->fs_info->super_copy,
2040                                     total_bytes + device->total_bytes);
2041
2042         total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2043         btrfs_set_super_num_devices(root->fs_info->super_copy,
2044                                     total_bytes + 1);
2045         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2046
2047         if (seeding_dev) {
2048                 ret = init_first_rw_device(trans, root, device);
2049                 if (ret) {
2050                         btrfs_abort_transaction(trans, root, ret);
2051                         goto error_trans;
2052                 }
2053                 ret = btrfs_finish_sprout(trans, root);
2054                 if (ret) {
2055                         btrfs_abort_transaction(trans, root, ret);
2056                         goto error_trans;
2057                 }
2058         } else {
2059                 ret = btrfs_add_device(trans, root, device);
2060                 if (ret) {
2061                         btrfs_abort_transaction(trans, root, ret);
2062                         goto error_trans;
2063                 }
2064         }
2065
2066         /*
2067          * we've got more storage, clear any full flags on the space
2068          * infos
2069          */
2070         btrfs_clear_space_info_full(root->fs_info);
2071
2072         unlock_chunks(root);
2073         root->fs_info->num_tolerated_disk_barrier_failures =
2074                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2075         ret = btrfs_commit_transaction(trans, root);
2076
2077         if (seeding_dev) {
2078                 mutex_unlock(&uuid_mutex);
2079                 up_write(&sb->s_umount);
2080
2081                 if (ret) /* transaction commit */
2082                         return ret;
2083
2084                 ret = btrfs_relocate_sys_chunks(root);
2085                 if (ret < 0)
2086                         btrfs_error(root->fs_info, ret,
2087                                     "Failed to relocate sys chunks after "
2088                                     "device initialization. This can be fixed "
2089                                     "using the \"btrfs balance\" command.");
2090                 trans = btrfs_attach_transaction(root);
2091                 if (IS_ERR(trans)) {
2092                         if (PTR_ERR(trans) == -ENOENT)
2093                                 return 0;
2094                         return PTR_ERR(trans);
2095                 }
2096                 ret = btrfs_commit_transaction(trans, root);
2097         }
2098
2099         return ret;
2100
2101 error_trans:
2102         unlock_chunks(root);
2103         btrfs_end_transaction(trans, root);
2104         rcu_string_free(device->name);
2105         kfree(device);
2106 error:
2107         blkdev_put(bdev, FMODE_EXCL);
2108         if (seeding_dev) {
2109                 mutex_unlock(&uuid_mutex);
2110                 up_write(&sb->s_umount);
2111         }
2112         return ret;
2113 }
2114
2115 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2116                                   struct btrfs_device **device_out)
2117 {
2118         struct request_queue *q;
2119         struct btrfs_device *device;
2120         struct block_device *bdev;
2121         struct btrfs_fs_info *fs_info = root->fs_info;
2122         struct list_head *devices;
2123         struct rcu_string *name;
2124         int ret = 0;
2125
2126         *device_out = NULL;
2127         if (fs_info->fs_devices->seeding)
2128                 return -EINVAL;
2129
2130         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2131                                   fs_info->bdev_holder);
2132         if (IS_ERR(bdev))
2133                 return PTR_ERR(bdev);
2134
2135         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2136
2137         devices = &fs_info->fs_devices->devices;
2138         list_for_each_entry(device, devices, dev_list) {
2139                 if (device->bdev == bdev) {
2140                         ret = -EEXIST;
2141                         goto error;
2142                 }
2143         }
2144
2145         device = kzalloc(sizeof(*device), GFP_NOFS);
2146         if (!device) {
2147                 ret = -ENOMEM;
2148                 goto error;
2149         }
2150
2151         name = rcu_string_strdup(device_path, GFP_NOFS);
2152         if (!name) {
2153                 kfree(device);
2154                 ret = -ENOMEM;
2155                 goto error;
2156         }
2157         rcu_assign_pointer(device->name, name);
2158
2159         q = bdev_get_queue(bdev);
2160         if (blk_queue_discard(q))
2161                 device->can_discard = 1;
2162         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2163         device->writeable = 1;
2164         device->work.func = pending_bios_fn;
2165         generate_random_uuid(device->uuid);
2166         device->devid = BTRFS_DEV_REPLACE_DEVID;
2167         spin_lock_init(&device->io_lock);
2168         device->generation = 0;
2169         device->io_width = root->sectorsize;
2170         device->io_align = root->sectorsize;
2171         device->sector_size = root->sectorsize;
2172         device->total_bytes = i_size_read(bdev->bd_inode);
2173         device->disk_total_bytes = device->total_bytes;
2174         device->dev_root = fs_info->dev_root;
2175         device->bdev = bdev;
2176         device->in_fs_metadata = 1;
2177         device->is_tgtdev_for_dev_replace = 1;
2178         device->mode = FMODE_EXCL;
2179         set_blocksize(device->bdev, 4096);
2180         device->fs_devices = fs_info->fs_devices;
2181         list_add(&device->dev_list, &fs_info->fs_devices->devices);
2182         fs_info->fs_devices->num_devices++;
2183         fs_info->fs_devices->open_devices++;
2184         if (device->can_discard)
2185                 fs_info->fs_devices->num_can_discard++;
2186         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2187
2188         *device_out = device;
2189         return ret;
2190
2191 error:
2192         blkdev_put(bdev, FMODE_EXCL);
2193         return ret;
2194 }
2195
2196 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2197                                               struct btrfs_device *tgtdev)
2198 {
2199         WARN_ON(fs_info->fs_devices->rw_devices == 0);
2200         tgtdev->io_width = fs_info->dev_root->sectorsize;
2201         tgtdev->io_align = fs_info->dev_root->sectorsize;
2202         tgtdev->sector_size = fs_info->dev_root->sectorsize;
2203         tgtdev->dev_root = fs_info->dev_root;
2204         tgtdev->in_fs_metadata = 1;
2205 }
2206
2207 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2208                                         struct btrfs_device *device)
2209 {
2210         int ret;
2211         struct btrfs_path *path;
2212         struct btrfs_root *root;
2213         struct btrfs_dev_item *dev_item;
2214         struct extent_buffer *leaf;
2215         struct btrfs_key key;
2216
2217         root = device->dev_root->fs_info->chunk_root;
2218
2219         path = btrfs_alloc_path();
2220         if (!path)
2221                 return -ENOMEM;
2222
2223         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2224         key.type = BTRFS_DEV_ITEM_KEY;
2225         key.offset = device->devid;
2226
2227         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2228         if (ret < 0)
2229                 goto out;
2230
2231         if (ret > 0) {
2232                 ret = -ENOENT;
2233                 goto out;
2234         }
2235
2236         leaf = path->nodes[0];
2237         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2238
2239         btrfs_set_device_id(leaf, dev_item, device->devid);
2240         btrfs_set_device_type(leaf, dev_item, device->type);
2241         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2242         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2243         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2244         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2245         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2246         btrfs_mark_buffer_dirty(leaf);
2247
2248 out:
2249         btrfs_free_path(path);
2250         return ret;
2251 }
2252
2253 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2254                       struct btrfs_device *device, u64 new_size)
2255 {
2256         struct btrfs_super_block *super_copy =
2257                 device->dev_root->fs_info->super_copy;
2258         u64 old_total = btrfs_super_total_bytes(super_copy);
2259         u64 diff = new_size - device->total_bytes;
2260
2261         if (!device->writeable)
2262                 return -EACCES;
2263         if (new_size <= device->total_bytes ||
2264             device->is_tgtdev_for_dev_replace)
2265                 return -EINVAL;
2266
2267         btrfs_set_super_total_bytes(super_copy, old_total + diff);
2268         device->fs_devices->total_rw_bytes += diff;
2269
2270         device->total_bytes = new_size;
2271         device->disk_total_bytes = new_size;
2272         btrfs_clear_space_info_full(device->dev_root->fs_info);
2273
2274         return btrfs_update_device(trans, device);
2275 }
2276
2277 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2278                       struct btrfs_device *device, u64 new_size)
2279 {
2280         int ret;
2281         lock_chunks(device->dev_root);
2282         ret = __btrfs_grow_device(trans, device, new_size);
2283         unlock_chunks(device->dev_root);
2284         return ret;
2285 }
2286
2287 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2288                             struct btrfs_root *root,
2289                             u64 chunk_tree, u64 chunk_objectid,
2290                             u64 chunk_offset)
2291 {
2292         int ret;
2293         struct btrfs_path *path;
2294         struct btrfs_key key;
2295
2296         root = root->fs_info->chunk_root;
2297         path = btrfs_alloc_path();
2298         if (!path)
2299                 return -ENOMEM;
2300
2301         key.objectid = chunk_objectid;
2302         key.offset = chunk_offset;
2303         key.type = BTRFS_CHUNK_ITEM_KEY;
2304
2305         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2306         if (ret < 0)
2307                 goto out;
2308         else if (ret > 0) { /* Logic error or corruption */
2309                 btrfs_error(root->fs_info, -ENOENT,
2310                             "Failed lookup while freeing chunk.");
2311                 ret = -ENOENT;
2312                 goto out;
2313         }
2314
2315         ret = btrfs_del_item(trans, root, path);
2316         if (ret < 0)
2317                 btrfs_error(root->fs_info, ret,
2318                             "Failed to delete chunk item.");
2319 out:
2320         btrfs_free_path(path);
2321         return ret;
2322 }
2323
2324 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2325                         chunk_offset)
2326 {
2327         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2328         struct btrfs_disk_key *disk_key;
2329         struct btrfs_chunk *chunk;
2330         u8 *ptr;
2331         int ret = 0;
2332         u32 num_stripes;
2333         u32 array_size;
2334         u32 len = 0;
2335         u32 cur;
2336         struct btrfs_key key;
2337
2338         array_size = btrfs_super_sys_array_size(super_copy);
2339
2340         ptr = super_copy->sys_chunk_array;
2341         cur = 0;
2342
2343         while (cur < array_size) {
2344                 disk_key = (struct btrfs_disk_key *)ptr;
2345                 btrfs_disk_key_to_cpu(&key, disk_key);
2346
2347                 len = sizeof(*disk_key);
2348
2349                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2350                         chunk = (struct btrfs_chunk *)(ptr + len);
2351                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2352                         len += btrfs_chunk_item_size(num_stripes);
2353                 } else {
2354                         ret = -EIO;
2355                         break;
2356                 }
2357                 if (key.objectid == chunk_objectid &&
2358                     key.offset == chunk_offset) {
2359                         memmove(ptr, ptr + len, array_size - (cur + len));
2360                         array_size -= len;
2361                         btrfs_set_super_sys_array_size(super_copy, array_size);
2362                 } else {
2363                         ptr += len;
2364                         cur += len;
2365                 }
2366         }
2367         return ret;
2368 }
2369
2370 static int btrfs_relocate_chunk(struct btrfs_root *root,
2371                          u64 chunk_tree, u64 chunk_objectid,
2372                          u64 chunk_offset)
2373 {
2374         struct extent_map_tree *em_tree;
2375         struct btrfs_root *extent_root;
2376         struct btrfs_trans_handle *trans;
2377         struct extent_map *em;
2378         struct map_lookup *map;
2379         int ret;
2380         int i;
2381
2382         root = root->fs_info->chunk_root;
2383         extent_root = root->fs_info->extent_root;
2384         em_tree = &root->fs_info->mapping_tree.map_tree;
2385
2386         ret = btrfs_can_relocate(extent_root, chunk_offset);
2387         if (ret)
2388                 return -ENOSPC;
2389
2390         /* step one, relocate all the extents inside this chunk */
2391         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2392         if (ret)
2393                 return ret;
2394
2395         trans = btrfs_start_transaction(root, 0);
2396         if (IS_ERR(trans)) {
2397                 ret = PTR_ERR(trans);
2398                 btrfs_std_error(root->fs_info, ret);
2399                 return ret;
2400         }
2401
2402         lock_chunks(root);
2403
2404         /*
2405          * step two, delete the device extents and the
2406          * chunk tree entries
2407          */
2408         read_lock(&em_tree->lock);
2409         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2410         read_unlock(&em_tree->lock);
2411
2412         BUG_ON(!em || em->start > chunk_offset ||
2413                em->start + em->len < chunk_offset);
2414         map = (struct map_lookup *)em->bdev;
2415
2416         for (i = 0; i < map->num_stripes; i++) {
2417                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2418                                             map->stripes[i].physical);
2419                 BUG_ON(ret);
2420
2421                 if (map->stripes[i].dev) {
2422                         ret = btrfs_update_device(trans, map->stripes[i].dev);
2423                         BUG_ON(ret);
2424                 }
2425         }
2426         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2427                                chunk_offset);
2428
2429         BUG_ON(ret);
2430
2431         trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2432
2433         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2434                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2435                 BUG_ON(ret);
2436         }
2437
2438         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2439         BUG_ON(ret);
2440
2441         write_lock(&em_tree->lock);
2442         remove_extent_mapping(em_tree, em);
2443         write_unlock(&em_tree->lock);
2444
2445         kfree(map);
2446         em->bdev = NULL;
2447
2448         /* once for the tree */
2449         free_extent_map(em);
2450         /* once for us */
2451         free_extent_map(em);
2452
2453         unlock_chunks(root);
2454         btrfs_end_transaction(trans, root);
2455         return 0;
2456 }
2457
2458 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2459 {
2460         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2461         struct btrfs_path *path;
2462         struct extent_buffer *leaf;
2463         struct btrfs_chunk *chunk;
2464         struct btrfs_key key;
2465         struct btrfs_key found_key;
2466         u64 chunk_tree = chunk_root->root_key.objectid;
2467         u64 chunk_type;
2468         bool retried = false;
2469         int failed = 0;
2470         int ret;
2471
2472         path = btrfs_alloc_path();
2473         if (!path)
2474                 return -ENOMEM;
2475
2476 again:
2477         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2478         key.offset = (u64)-1;
2479         key.type = BTRFS_CHUNK_ITEM_KEY;
2480
2481         while (1) {
2482                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2483                 if (ret < 0)
2484                         goto error;
2485                 BUG_ON(ret == 0); /* Corruption */
2486
2487                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2488                                           key.type);
2489                 if (ret < 0)
2490                         goto error;
2491                 if (ret > 0)
2492                         break;
2493
2494                 leaf = path->nodes[0];
2495                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2496
2497                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2498                                        struct btrfs_chunk);
2499                 chunk_type = btrfs_chunk_type(leaf, chunk);
2500                 btrfs_release_path(path);
2501
2502                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2503                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2504                                                    found_key.objectid,
2505                                                    found_key.offset);
2506                         if (ret == -ENOSPC)
2507                                 failed++;
2508                         else if (ret)
2509                                 BUG();
2510                 }
2511
2512                 if (found_key.offset == 0)
2513                         break;
2514                 key.offset = found_key.offset - 1;
2515         }
2516         ret = 0;
2517         if (failed && !retried) {
2518                 failed = 0;
2519                 retried = true;
2520                 goto again;
2521         } else if (failed && retried) {
2522                 WARN_ON(1);
2523                 ret = -ENOSPC;
2524         }
2525 error:
2526         btrfs_free_path(path);
2527         return ret;
2528 }
2529
2530 static int insert_balance_item(struct btrfs_root *root,
2531                                struct btrfs_balance_control *bctl)
2532 {
2533         struct btrfs_trans_handle *trans;
2534         struct btrfs_balance_item *item;
2535         struct btrfs_disk_balance_args disk_bargs;
2536         struct btrfs_path *path;
2537         struct extent_buffer *leaf;
2538         struct btrfs_key key;
2539         int ret, err;
2540
2541         path = btrfs_alloc_path();
2542         if (!path)
2543                 return -ENOMEM;
2544
2545         trans = btrfs_start_transaction(root, 0);
2546         if (IS_ERR(trans)) {
2547                 btrfs_free_path(path);
2548                 return PTR_ERR(trans);
2549         }
2550
2551         key.objectid = BTRFS_BALANCE_OBJECTID;
2552         key.type = BTRFS_BALANCE_ITEM_KEY;
2553         key.offset = 0;
2554
2555         ret = btrfs_insert_empty_item(trans, root, path, &key,
2556                                       sizeof(*item));
2557         if (ret)
2558                 goto out;
2559
2560         leaf = path->nodes[0];
2561         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2562
2563         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2564
2565         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2566         btrfs_set_balance_data(leaf, item, &disk_bargs);
2567         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2568         btrfs_set_balance_meta(leaf, item, &disk_bargs);
2569         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2570         btrfs_set_balance_sys(leaf, item, &disk_bargs);
2571
2572         btrfs_set_balance_flags(leaf, item, bctl->flags);
2573
2574         btrfs_mark_buffer_dirty(leaf);
2575 out:
2576         btrfs_free_path(path);
2577         err = btrfs_commit_transaction(trans, root);
2578         if (err && !ret)
2579                 ret = err;
2580         return ret;
2581 }
2582
2583 static int del_balance_item(struct btrfs_root *root)
2584 {
2585         struct btrfs_trans_handle *trans;
2586         struct btrfs_path *path;
2587         struct btrfs_key key;
2588         int ret, err;
2589
2590         path = btrfs_alloc_path();
2591         if (!path)
2592                 return -ENOMEM;
2593
2594         trans = btrfs_start_transaction(root, 0);
2595         if (IS_ERR(trans)) {
2596                 btrfs_free_path(path);
2597                 return PTR_ERR(trans);
2598         }
2599
2600         key.objectid = BTRFS_BALANCE_OBJECTID;
2601         key.type = BTRFS_BALANCE_ITEM_KEY;
2602         key.offset = 0;
2603
2604         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2605         if (ret < 0)
2606                 goto out;
2607         if (ret > 0) {
2608                 ret = -ENOENT;
2609                 goto out;
2610         }
2611
2612         ret = btrfs_del_item(trans, root, path);
2613 out:
2614         btrfs_free_path(path);
2615         err = btrfs_commit_transaction(trans, root);
2616         if (err && !ret)
2617                 ret = err;
2618         return ret;
2619 }
2620
2621 /*
2622  * This is a heuristic used to reduce the number of chunks balanced on
2623  * resume after balance was interrupted.
2624  */
2625 static void update_balance_args(struct btrfs_balance_control *bctl)
2626 {
2627         /*
2628          * Turn on soft mode for chunk types that were being converted.
2629          */
2630         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2631                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2632         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2633                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2634         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2635                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2636
2637         /*
2638          * Turn on usage filter if is not already used.  The idea is
2639          * that chunks that we have already balanced should be
2640          * reasonably full.  Don't do it for chunks that are being
2641          * converted - that will keep us from relocating unconverted
2642          * (albeit full) chunks.
2643          */
2644         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2645             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2646                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2647                 bctl->data.usage = 90;
2648         }
2649         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2650             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2651                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2652                 bctl->sys.usage = 90;
2653         }
2654         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2655             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2656                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2657                 bctl->meta.usage = 90;
2658         }
2659 }
2660
2661 /*
2662  * Should be called with both balance and volume mutexes held to
2663  * serialize other volume operations (add_dev/rm_dev/resize) with
2664  * restriper.  Same goes for unset_balance_control.
2665  */
2666 static void set_balance_control(struct btrfs_balance_control *bctl)
2667 {
2668         struct btrfs_fs_info *fs_info = bctl->fs_info;
2669
2670         BUG_ON(fs_info->balance_ctl);
2671
2672         spin_lock(&fs_info->balance_lock);
2673         fs_info->balance_ctl = bctl;
2674         spin_unlock(&fs_info->balance_lock);
2675 }
2676
2677 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2678 {
2679         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2680
2681         BUG_ON(!fs_info->balance_ctl);
2682
2683         spin_lock(&fs_info->balance_lock);
2684         fs_info->balance_ctl = NULL;
2685         spin_unlock(&fs_info->balance_lock);
2686
2687         kfree(bctl);
2688 }
2689
2690 /*
2691  * Balance filters.  Return 1 if chunk should be filtered out
2692  * (should not be balanced).
2693  */
2694 static int chunk_profiles_filter(u64 chunk_type,
2695                                  struct btrfs_balance_args *bargs)
2696 {
2697         chunk_type = chunk_to_extended(chunk_type) &
2698                                 BTRFS_EXTENDED_PROFILE_MASK;
2699
2700         if (bargs->profiles & chunk_type)
2701                 return 0;
2702
2703         return 1;
2704 }
2705
2706 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2707                               struct btrfs_balance_args *bargs)
2708 {
2709         struct btrfs_block_group_cache *cache;
2710         u64 chunk_used, user_thresh;
2711         int ret = 1;
2712
2713         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2714         chunk_used = btrfs_block_group_used(&cache->item);
2715
2716         if (bargs->usage == 0)
2717                 user_thresh = 1;
2718         else if (bargs->usage > 100)
2719                 user_thresh = cache->key.offset;
2720         else
2721                 user_thresh = div_factor_fine(cache->key.offset,
2722                                               bargs->usage);
2723
2724         if (chunk_used < user_thresh)
2725                 ret = 0;
2726
2727         btrfs_put_block_group(cache);
2728         return ret;
2729 }
2730
2731 static int chunk_devid_filter(struct extent_buffer *leaf,
2732                               struct btrfs_chunk *chunk,
2733                               struct btrfs_balance_args *bargs)
2734 {
2735         struct btrfs_stripe *stripe;
2736         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2737         int i;
2738
2739         for (i = 0; i < num_stripes; i++) {
2740                 stripe = btrfs_stripe_nr(chunk, i);
2741                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2742                         return 0;
2743         }
2744
2745         return 1;
2746 }
2747
2748 /* [pstart, pend) */
2749 static int chunk_drange_filter(struct extent_buffer *leaf,
2750                                struct btrfs_chunk *chunk,
2751                                u64 chunk_offset,
2752                                struct btrfs_balance_args *bargs)
2753 {
2754         struct btrfs_stripe *stripe;
2755         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2756         u64 stripe_offset;
2757         u64 stripe_length;
2758         int factor;
2759         int i;
2760
2761         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2762                 return 0;
2763
2764         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2765              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2766                 factor = num_stripes / 2;
2767         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2768                 factor = num_stripes - 1;
2769         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2770                 factor = num_stripes - 2;
2771         } else {
2772                 factor = num_stripes;
2773         }
2774
2775         for (i = 0; i < num_stripes; i++) {
2776                 stripe = btrfs_stripe_nr(chunk, i);
2777                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2778                         continue;
2779
2780                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2781                 stripe_length = btrfs_chunk_length(leaf, chunk);
2782                 do_div(stripe_length, factor);
2783
2784                 if (stripe_offset < bargs->pend &&
2785                     stripe_offset + stripe_length > bargs->pstart)
2786                         return 0;
2787         }
2788
2789         return 1;
2790 }
2791
2792 /* [vstart, vend) */
2793 static int chunk_vrange_filter(struct extent_buffer *leaf,
2794                                struct btrfs_chunk *chunk,
2795                                u64 chunk_offset,
2796                                struct btrfs_balance_args *bargs)
2797 {
2798         if (chunk_offset < bargs->vend &&
2799             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2800                 /* at least part of the chunk is inside this vrange */
2801                 return 0;
2802
2803         return 1;
2804 }
2805
2806 static int chunk_soft_convert_filter(u64 chunk_type,
2807                                      struct btrfs_balance_args *bargs)
2808 {
2809         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2810                 return 0;
2811
2812         chunk_type = chunk_to_extended(chunk_type) &
2813                                 BTRFS_EXTENDED_PROFILE_MASK;
2814
2815         if (bargs->target == chunk_type)
2816                 return 1;
2817
2818         return 0;
2819 }
2820
2821 static int should_balance_chunk(struct btrfs_root *root,
2822                                 struct extent_buffer *leaf,
2823                                 struct btrfs_chunk *chunk, u64 chunk_offset)
2824 {
2825         struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2826         struct btrfs_balance_args *bargs = NULL;
2827         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2828
2829         /* type filter */
2830         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2831               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2832                 return 0;
2833         }
2834
2835         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2836                 bargs = &bctl->data;
2837         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2838                 bargs = &bctl->sys;
2839         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2840                 bargs = &bctl->meta;
2841
2842         /* profiles filter */
2843         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2844             chunk_profiles_filter(chunk_type, bargs)) {
2845                 return 0;
2846         }
2847
2848         /* usage filter */
2849         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2850             chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2851                 return 0;
2852         }
2853
2854         /* devid filter */
2855         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2856             chunk_devid_filter(leaf, chunk, bargs)) {
2857                 return 0;
2858         }
2859
2860         /* drange filter, makes sense only with devid filter */
2861         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2862             chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2863                 return 0;
2864         }
2865
2866         /* vrange filter */
2867         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2868             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2869                 return 0;
2870         }
2871
2872         /* soft profile changing mode */
2873         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2874             chunk_soft_convert_filter(chunk_type, bargs)) {
2875                 return 0;
2876         }
2877
2878         return 1;
2879 }
2880
2881 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2882 {
2883         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2884         struct btrfs_root *chunk_root = fs_info->chunk_root;
2885         struct btrfs_root *dev_root = fs_info->dev_root;
2886         struct list_head *devices;
2887         struct btrfs_device *device;
2888         u64 old_size;
2889         u64 size_to_free;
2890         struct btrfs_chunk *chunk;
2891         struct btrfs_path *path;
2892         struct btrfs_key key;
2893         struct btrfs_key found_key;
2894         struct btrfs_trans_handle *trans;
2895         struct extent_buffer *leaf;
2896         int slot;
2897         int ret;
2898         int enospc_errors = 0;
2899         bool counting = true;
2900
2901         /* step one make some room on all the devices */
2902         devices = &fs_info->fs_devices->devices;
2903         list_for_each_entry(device, devices, dev_list) {
2904                 old_size = device->total_bytes;
2905                 size_to_free = div_factor(old_size, 1);
2906                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2907                 if (!device->writeable ||
2908                     device->total_bytes - device->bytes_used > size_to_free ||
2909                     device->is_tgtdev_for_dev_replace)
2910                         continue;
2911
2912                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2913                 if (ret == -ENOSPC)
2914                         break;
2915                 BUG_ON(ret);
2916
2917                 trans = btrfs_start_transaction(dev_root, 0);
2918                 BUG_ON(IS_ERR(trans));
2919
2920                 ret = btrfs_grow_device(trans, device, old_size);
2921                 BUG_ON(ret);
2922
2923                 btrfs_end_transaction(trans, dev_root);
2924         }
2925
2926         /* step two, relocate all the chunks */
2927         path = btrfs_alloc_path();
2928         if (!path) {
2929                 ret = -ENOMEM;
2930                 goto error;
2931         }
2932
2933         /* zero out stat counters */
2934         spin_lock(&fs_info->balance_lock);
2935         memset(&bctl->stat, 0, sizeof(bctl->stat));
2936         spin_unlock(&fs_info->balance_lock);
2937 again:
2938         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2939         key.offset = (u64)-1;
2940         key.type = BTRFS_CHUNK_ITEM_KEY;
2941
2942         while (1) {
2943                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2944                     atomic_read(&fs_info->balance_cancel_req)) {
2945                         ret = -ECANCELED;
2946                         goto error;
2947                 }
2948
2949                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2950                 if (ret < 0)
2951                         goto error;
2952
2953                 /*
2954                  * this shouldn't happen, it means the last relocate
2955                  * failed
2956                  */
2957                 if (ret == 0)
2958                         BUG(); /* FIXME break ? */
2959
2960                 ret = btrfs_previous_item(chunk_root, path, 0,
2961                                           BTRFS_CHUNK_ITEM_KEY);
2962                 if (ret) {
2963                         ret = 0;
2964                         break;
2965                 }
2966
2967                 leaf = path->nodes[0];
2968                 slot = path->slots[0];
2969                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2970
2971                 if (found_key.objectid != key.objectid)
2972                         break;
2973
2974                 /* chunk zero is special */
2975                 if (found_key.offset == 0)
2976                         break;
2977
2978                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2979
2980                 if (!counting) {
2981                         spin_lock(&fs_info->balance_lock);
2982                         bctl->stat.considered++;
2983                         spin_unlock(&fs_info->balance_lock);
2984                 }
2985
2986                 ret = should_balance_chunk(chunk_root, leaf, chunk,
2987                                            found_key.offset);
2988                 btrfs_release_path(path);
2989                 if (!ret)
2990                         goto loop;
2991
2992                 if (counting) {
2993                         spin_lock(&fs_info->balance_lock);
2994                         bctl->stat.expected++;
2995                         spin_unlock(&fs_info->balance_lock);
2996                         goto loop;
2997                 }
2998
2999                 ret = btrfs_relocate_chunk(chunk_root,
3000                                            chunk_root->root_key.objectid,
3001                                            found_key.objectid,
3002                                            found_key.offset);
3003                 if (ret && ret != -ENOSPC)
3004                         goto error;
3005                 if (ret == -ENOSPC) {
3006                         enospc_errors++;
3007                 } else {
3008                         spin_lock(&fs_info->balance_lock);
3009                         bctl->stat.completed++;
3010                         spin_unlock(&fs_info->balance_lock);
3011                 }
3012 loop:
3013                 key.offset = found_key.offset - 1;
3014         }
3015
3016         if (counting) {
3017                 btrfs_release_path(path);
3018                 counting = false;
3019                 goto again;
3020         }
3021 error:
3022         btrfs_free_path(path);
3023         if (enospc_errors) {
3024                 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3025                        enospc_errors);
3026                 if (!ret)
3027                         ret = -ENOSPC;
3028         }
3029
3030         return ret;
3031 }
3032
3033 /**
3034  * alloc_profile_is_valid - see if a given profile is valid and reduced
3035  * @flags: profile to validate
3036  * @extended: if true @flags is treated as an extended profile
3037  */
3038 static int alloc_profile_is_valid(u64 flags, int extended)
3039 {
3040         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3041                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3042
3043         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3044
3045         /* 1) check that all other bits are zeroed */
3046         if (flags & ~mask)
3047                 return 0;
3048
3049         /* 2) see if profile is reduced */
3050         if (flags == 0)
3051                 return !extended; /* "0" is valid for usual profiles */
3052
3053         /* true if exactly one bit set */
3054         return (flags & (flags - 1)) == 0;
3055 }
3056
3057 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3058 {
3059         /* cancel requested || normal exit path */
3060         return atomic_read(&fs_info->balance_cancel_req) ||
3061                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3062                  atomic_read(&fs_info->balance_cancel_req) == 0);
3063 }
3064
3065 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3066 {
3067         int ret;
3068
3069         unset_balance_control(fs_info);
3070         ret = del_balance_item(fs_info->tree_root);
3071         if (ret)
3072                 btrfs_std_error(fs_info, ret);
3073
3074         atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3075 }
3076
3077 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3078                                struct btrfs_ioctl_balance_args *bargs);
3079
3080 /*
3081  * Should be called with both balance and volume mutexes held
3082  */
3083 int btrfs_balance(struct btrfs_balance_control *bctl,
3084                   struct btrfs_ioctl_balance_args *bargs)
3085 {
3086         struct btrfs_fs_info *fs_info = bctl->fs_info;
3087         u64 allowed;
3088         int mixed = 0;
3089         int ret;
3090         u64 num_devices;
3091         unsigned seq;
3092
3093         if (btrfs_fs_closing(fs_info) ||
3094             atomic_read(&fs_info->balance_pause_req) ||
3095             atomic_read(&fs_info->balance_cancel_req)) {
3096                 ret = -EINVAL;
3097                 goto out;
3098         }
3099
3100         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3101         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3102                 mixed = 1;
3103
3104         /*
3105          * In case of mixed groups both data and meta should be picked,
3106          * and identical options should be given for both of them.
3107          */
3108         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3109         if (mixed && (bctl->flags & allowed)) {
3110                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3111                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3112                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3113                         printk(KERN_ERR "btrfs: with mixed groups data and "
3114                                "metadata balance options must be the same\n");
3115                         ret = -EINVAL;
3116                         goto out;
3117                 }
3118         }
3119
3120         num_devices = fs_info->fs_devices->num_devices;
3121         btrfs_dev_replace_lock(&fs_info->dev_replace);
3122         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3123                 BUG_ON(num_devices < 1);
3124                 num_devices--;
3125         }
3126         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3127         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3128         if (num_devices == 1)
3129                 allowed |= BTRFS_BLOCK_GROUP_DUP;
3130         else if (num_devices > 1)
3131                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3132         if (num_devices > 2)
3133                 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3134         if (num_devices > 3)
3135                 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3136                             BTRFS_BLOCK_GROUP_RAID6);
3137         if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3138             (!alloc_profile_is_valid(bctl->data.target, 1) ||
3139              (bctl->data.target & ~allowed))) {
3140                 printk(KERN_ERR "btrfs: unable to start balance with target "
3141                        "data profile %llu\n",
3142                        (unsigned long long)bctl->data.target);
3143                 ret = -EINVAL;
3144                 goto out;
3145         }
3146         if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147             (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3148              (bctl->meta.target & ~allowed))) {
3149                 printk(KERN_ERR "btrfs: unable to start balance with target "
3150                        "metadata profile %llu\n",
3151                        (unsigned long long)bctl->meta.target);
3152                 ret = -EINVAL;
3153                 goto out;
3154         }
3155         if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3156             (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3157              (bctl->sys.target & ~allowed))) {
3158                 printk(KERN_ERR "btrfs: unable to start balance with target "
3159                        "system profile %llu\n",
3160                        (unsigned long long)bctl->sys.target);
3161                 ret = -EINVAL;
3162                 goto out;
3163         }
3164
3165         /* allow dup'ed data chunks only in mixed mode */
3166         if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3167             (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3168                 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3169                 ret = -EINVAL;
3170                 goto out;
3171         }
3172
3173         /* allow to reduce meta or sys integrity only if force set */
3174         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3175                         BTRFS_BLOCK_GROUP_RAID10 |
3176                         BTRFS_BLOCK_GROUP_RAID5 |
3177                         BTRFS_BLOCK_GROUP_RAID6;
3178         do {
3179                 seq = read_seqbegin(&fs_info->profiles_lock);
3180
3181                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3182                      (fs_info->avail_system_alloc_bits & allowed) &&
3183                      !(bctl->sys.target & allowed)) ||
3184                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3185                      (fs_info->avail_metadata_alloc_bits & allowed) &&
3186                      !(bctl->meta.target & allowed))) {
3187                         if (bctl->flags & BTRFS_BALANCE_FORCE) {
3188                                 printk(KERN_INFO "btrfs: force reducing metadata "
3189                                        "integrity\n");
3190                         } else {
3191                                 printk(KERN_ERR "btrfs: balance will reduce metadata "
3192                                        "integrity, use force if you want this\n");
3193                                 ret = -EINVAL;
3194                                 goto out;
3195                         }
3196                 }
3197         } while (read_seqretry(&fs_info->profiles_lock, seq));
3198
3199         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3200                 int num_tolerated_disk_barrier_failures;
3201                 u64 target = bctl->sys.target;
3202
3203                 num_tolerated_disk_barrier_failures =
3204                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3205                 if (num_tolerated_disk_barrier_failures > 0 &&
3206                     (target &
3207                      (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3208                       BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3209                         num_tolerated_disk_barrier_failures = 0;
3210                 else if (num_tolerated_disk_barrier_failures > 1 &&
3211                          (target &
3212                           (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3213                         num_tolerated_disk_barrier_failures = 1;
3214
3215                 fs_info->num_tolerated_disk_barrier_failures =
3216                         num_tolerated_disk_barrier_failures;
3217         }
3218
3219         ret = insert_balance_item(fs_info->tree_root, bctl);
3220         if (ret && ret != -EEXIST)
3221                 goto out;
3222
3223         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3224                 BUG_ON(ret == -EEXIST);
3225                 set_balance_control(bctl);
3226         } else {
3227                 BUG_ON(ret != -EEXIST);
3228                 spin_lock(&fs_info->balance_lock);
3229                 update_balance_args(bctl);
3230                 spin_unlock(&fs_info->balance_lock);
3231         }
3232
3233         atomic_inc(&fs_info->balance_running);
3234         mutex_unlock(&fs_info->balance_mutex);
3235
3236         ret = __btrfs_balance(fs_info);
3237
3238         mutex_lock(&fs_info->balance_mutex);
3239         atomic_dec(&fs_info->balance_running);
3240
3241         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3242                 fs_info->num_tolerated_disk_barrier_failures =
3243                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3244         }
3245
3246         if (bargs) {
3247                 memset(bargs, 0, sizeof(*bargs));
3248                 update_ioctl_balance_args(fs_info, 0, bargs);
3249         }
3250
3251         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3252             balance_need_close(fs_info)) {
3253                 __cancel_balance(fs_info);
3254         }
3255
3256         wake_up(&fs_info->balance_wait_q);
3257
3258         return ret;
3259 out:
3260         if (bctl->flags & BTRFS_BALANCE_RESUME)
3261                 __cancel_balance(fs_info);
3262         else {
3263                 kfree(bctl);
3264                 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3265         }
3266         return ret;
3267 }
3268
3269 static int balance_kthread(void *data)
3270 {
3271         struct btrfs_fs_info *fs_info = data;
3272         int ret = 0;
3273
3274         mutex_lock(&fs_info->volume_mutex);
3275         mutex_lock(&fs_info->balance_mutex);
3276
3277         if (fs_info->balance_ctl) {
3278                 printk(KERN_INFO "btrfs: continuing balance\n");
3279                 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3280         }
3281
3282         mutex_unlock(&fs_info->balance_mutex);
3283         mutex_unlock(&fs_info->volume_mutex);
3284
3285         return ret;
3286 }
3287
3288 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3289 {
3290         struct task_struct *tsk;
3291
3292         spin_lock(&fs_info->balance_lock);
3293         if (!fs_info->balance_ctl) {
3294                 spin_unlock(&fs_info->balance_lock);
3295                 return 0;
3296         }
3297         spin_unlock(&fs_info->balance_lock);
3298
3299         if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3300                 printk(KERN_INFO "btrfs: force skipping balance\n");
3301                 return 0;
3302         }
3303
3304         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3305         return PTR_RET(tsk);
3306 }
3307
3308 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3309 {
3310         struct btrfs_balance_control *bctl;
3311         struct btrfs_balance_item *item;
3312         struct btrfs_disk_balance_args disk_bargs;
3313         struct btrfs_path *path;
3314         struct extent_buffer *leaf;
3315         struct btrfs_key key;
3316         int ret;
3317
3318         path = btrfs_alloc_path();
3319         if (!path)
3320                 return -ENOMEM;
3321
3322         key.objectid = BTRFS_BALANCE_OBJECTID;
3323         key.type = BTRFS_BALANCE_ITEM_KEY;
3324         key.offset = 0;
3325
3326         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3327         if (ret < 0)
3328                 goto out;
3329         if (ret > 0) { /* ret = -ENOENT; */
3330                 ret = 0;
3331                 goto out;
3332         }
3333
3334         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3335         if (!bctl) {
3336                 ret = -ENOMEM;
3337                 goto out;
3338         }
3339
3340         leaf = path->nodes[0];
3341         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3342
3343         bctl->fs_info = fs_info;
3344         bctl->flags = btrfs_balance_flags(leaf, item);
3345         bctl->flags |= BTRFS_BALANCE_RESUME;
3346
3347         btrfs_balance_data(leaf, item, &disk_bargs);
3348         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3349         btrfs_balance_meta(leaf, item, &disk_bargs);
3350         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3351         btrfs_balance_sys(leaf, item, &disk_bargs);
3352         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3353
3354         WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3355
3356         mutex_lock(&fs_info->volume_mutex);
3357         mutex_lock(&fs_info->balance_mutex);
3358
3359         set_balance_control(bctl);
3360
3361         mutex_unlock(&fs_info->balance_mutex);
3362         mutex_unlock(&fs_info->volume_mutex);
3363 out:
3364         btrfs_free_path(path);
3365         return ret;
3366 }
3367
3368 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3369 {
3370         int ret = 0;
3371
3372         mutex_lock(&fs_info->balance_mutex);
3373         if (!fs_info->balance_ctl) {
3374                 mutex_unlock(&fs_info->balance_mutex);
3375                 return -ENOTCONN;
3376         }
3377
3378         if (atomic_read(&fs_info->balance_running)) {
3379                 atomic_inc(&fs_info->balance_pause_req);
3380                 mutex_unlock(&fs_info->balance_mutex);
3381
3382                 wait_event(fs_info->balance_wait_q,
3383                            atomic_read(&fs_info->balance_running) == 0);
3384
3385                 mutex_lock(&fs_info->balance_mutex);
3386                 /* we are good with balance_ctl ripped off from under us */
3387                 BUG_ON(atomic_read(&fs_info->balance_running));
3388                 atomic_dec(&fs_info->balance_pause_req);
3389         } else {
3390                 ret = -ENOTCONN;
3391         }
3392
3393         mutex_unlock(&fs_info->balance_mutex);
3394         return ret;
3395 }
3396
3397 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3398 {
3399         mutex_lock(&fs_info->balance_mutex);
3400         if (!fs_info->balance_ctl) {
3401                 mutex_unlock(&fs_info->balance_mutex);
3402                 return -ENOTCONN;
3403         }
3404
3405         atomic_inc(&fs_info->balance_cancel_req);
3406         /*
3407          * if we are running just wait and return, balance item is
3408          * deleted in btrfs_balance in this case
3409          */
3410         if (atomic_read(&fs_info->balance_running)) {
3411                 mutex_unlock(&fs_info->balance_mutex);
3412                 wait_event(fs_info->balance_wait_q,
3413                            atomic_read(&fs_info->balance_running) == 0);
3414                 mutex_lock(&fs_info->balance_mutex);
3415         } else {
3416                 /* __cancel_balance needs volume_mutex */
3417                 mutex_unlock(&fs_info->balance_mutex);
3418                 mutex_lock(&fs_info->volume_mutex);
3419                 mutex_lock(&fs_info->balance_mutex);
3420
3421                 if (fs_info->balance_ctl)
3422                         __cancel_balance(fs_info);
3423
3424                 mutex_unlock(&fs_info->volume_mutex);
3425         }
3426
3427         BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3428         atomic_dec(&fs_info->balance_cancel_req);
3429         mutex_unlock(&fs_info->balance_mutex);
3430         return 0;
3431 }
3432
3433 /*
3434  * shrinking a device means finding all of the device extents past
3435  * the new size, and then following the back refs to the chunks.
3436  * The chunk relocation code actually frees the device extent
3437  */
3438 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3439 {
3440         struct btrfs_trans_handle *trans;
3441         struct btrfs_root *root = device->dev_root;
3442         struct btrfs_dev_extent *dev_extent = NULL;
3443         struct btrfs_path *path;
3444         u64 length;
3445         u64 chunk_tree;
3446         u64 chunk_objectid;
3447         u64 chunk_offset;
3448         int ret;
3449         int slot;
3450         int failed = 0;
3451         bool retried = false;
3452         struct extent_buffer *l;
3453         struct btrfs_key key;
3454         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3455         u64 old_total = btrfs_super_total_bytes(super_copy);
3456         u64 old_size = device->total_bytes;
3457         u64 diff = device->total_bytes - new_size;
3458
3459         if (device->is_tgtdev_for_dev_replace)
3460                 return -EINVAL;
3461
3462         path = btrfs_alloc_path();
3463         if (!path)
3464                 return -ENOMEM;
3465
3466         path->reada = 2;
3467
3468         lock_chunks(root);
3469
3470         device->total_bytes = new_size;
3471         if (device->writeable) {
3472                 device->fs_devices->total_rw_bytes -= diff;
3473                 spin_lock(&root->fs_info->free_chunk_lock);
3474                 root->fs_info->free_chunk_space -= diff;
3475                 spin_unlock(&root->fs_info->free_chunk_lock);
3476         }
3477         unlock_chunks(root);
3478
3479 again:
3480         key.objectid = device->devid;
3481         key.offset = (u64)-1;
3482         key.type = BTRFS_DEV_EXTENT_KEY;
3483
3484         do {
3485                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3486                 if (ret < 0)
3487                         goto done;
3488
3489                 ret = btrfs_previous_item(root, path, 0, key.type);
3490                 if (ret < 0)
3491                         goto done;
3492                 if (ret) {
3493                         ret = 0;
3494                         btrfs_release_path(path);
3495                         break;
3496                 }
3497
3498                 l = path->nodes[0];
3499                 slot = path->slots[0];
3500                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3501
3502                 if (key.objectid != device->devid) {
3503                         btrfs_release_path(path);
3504                         break;
3505                 }
3506
3507                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3508                 length = btrfs_dev_extent_length(l, dev_extent);
3509
3510                 if (key.offset + length <= new_size) {
3511                         btrfs_release_path(path);
3512                         break;
3513                 }
3514
3515                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3516                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3517                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3518                 btrfs_release_path(path);
3519
3520                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3521                                            chunk_offset);
3522                 if (ret && ret != -ENOSPC)
3523                         goto done;
3524                 if (ret == -ENOSPC)
3525                         failed++;
3526         } while (key.offset-- > 0);
3527
3528         if (failed && !retried) {
3529                 failed = 0;
3530                 retried = true;
3531                 goto again;
3532         } else if (failed && retried) {
3533                 ret = -ENOSPC;
3534                 lock_chunks(root);
3535
3536                 device->total_bytes = old_size;
3537                 if (device->writeable)
3538                         device->fs_devices->total_rw_bytes += diff;
3539                 spin_lock(&root->fs_info->free_chunk_lock);
3540                 root->fs_info->free_chunk_space += diff;
3541                 spin_unlock(&root->fs_info->free_chunk_lock);
3542                 unlock_chunks(root);
3543                 goto done;
3544         }
3545
3546         /* Shrinking succeeded, else we would be at "done". */
3547         trans = btrfs_start_transaction(root, 0);
3548         if (IS_ERR(trans)) {
3549                 ret = PTR_ERR(trans);
3550                 goto done;
3551         }
3552
3553         lock_chunks(root);
3554
3555         device->disk_total_bytes = new_size;
3556         /* Now btrfs_update_device() will change the on-disk size. */
3557         ret = btrfs_update_device(trans, device);
3558         if (ret) {
3559                 unlock_chunks(root);
3560                 btrfs_end_transaction(trans, root);
3561                 goto done;
3562         }
3563         WARN_ON(diff > old_total);
3564         btrfs_set_super_total_bytes(super_copy, old_total - diff);
3565         unlock_chunks(root);
3566         btrfs_end_transaction(trans, root);
3567 done:
3568         btrfs_free_path(path);
3569         return ret;
3570 }
3571
3572 static int btrfs_add_system_chunk(struct btrfs_root *root,
3573                            struct btrfs_key *key,
3574                            struct btrfs_chunk *chunk, int item_size)
3575 {
3576         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3577         struct btrfs_disk_key disk_key;
3578         u32 array_size;
3579         u8 *ptr;
3580
3581         array_size = btrfs_super_sys_array_size(super_copy);
3582         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3583                 return -EFBIG;
3584
3585         ptr = super_copy->sys_chunk_array + array_size;
3586         btrfs_cpu_key_to_disk(&disk_key, key);
3587         memcpy(ptr, &disk_key, sizeof(disk_key));
3588         ptr += sizeof(disk_key);
3589         memcpy(ptr, chunk, item_size);
3590         item_size += sizeof(disk_key);
3591         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3592         return 0;
3593 }
3594
3595 /*
3596  * sort the devices in descending order by max_avail, total_avail
3597  */
3598 static int btrfs_cmp_device_info(const void *a, const void *b)
3599 {
3600         const struct btrfs_device_info *di_a = a;
3601         const struct btrfs_device_info *di_b = b;
3602
3603         if (di_a->max_avail > di_b->max_avail)
3604                 return -1;
3605         if (di_a->max_avail < di_b->max_avail)
3606                 return 1;
3607         if (di_a->total_avail > di_b->total_avail)
3608                 return -1;
3609         if (di_a->total_avail < di_b->total_avail)
3610                 return 1;
3611         return 0;
3612 }
3613
3614 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3615         [BTRFS_RAID_RAID10] = {
3616                 .sub_stripes    = 2,
3617                 .dev_stripes    = 1,
3618                 .devs_max       = 0,    /* 0 == as many as possible */
3619                 .devs_min       = 4,
3620                 .devs_increment = 2,
3621                 .ncopies        = 2,
3622         },
3623         [BTRFS_RAID_RAID1] = {
3624                 .sub_stripes    = 1,
3625                 .dev_stripes    = 1,
3626                 .devs_max       = 2,
3627                 .devs_min       = 2,
3628                 .devs_increment = 2,
3629                 .ncopies        = 2,
3630         },
3631         [BTRFS_RAID_DUP] = {
3632                 .sub_stripes    = 1,
3633                 .dev_stripes    = 2,
3634                 .devs_max       = 1,
3635                 .devs_min       = 1,
3636                 .devs_increment = 1,
3637                 .ncopies        = 2,
3638         },
3639         [BTRFS_RAID_RAID0] = {
3640                 .sub_stripes    = 1,
3641                 .dev_stripes    = 1,
3642                 .devs_max       = 0,
3643                 .devs_min       = 2,
3644                 .devs_increment = 1,
3645                 .ncopies        = 1,
3646         },
3647         [BTRFS_RAID_SINGLE] = {
3648                 .sub_stripes    = 1,
3649                 .dev_stripes    = 1,
3650                 .devs_max       = 1,
3651                 .devs_min       = 1,
3652                 .devs_increment = 1,
3653                 .ncopies        = 1,
3654         },
3655         [BTRFS_RAID_RAID5] = {
3656                 .sub_stripes    = 1,
3657                 .dev_stripes    = 1,
3658                 .devs_max       = 0,
3659                 .devs_min       = 2,
3660                 .devs_increment = 1,
3661                 .ncopies        = 2,
3662         },
3663         [BTRFS_RAID_RAID6] = {
3664                 .sub_stripes    = 1,
3665                 .dev_stripes    = 1,
3666                 .devs_max       = 0,
3667                 .devs_min       = 3,
3668                 .devs_increment = 1,
3669                 .ncopies        = 3,
3670         },
3671 };
3672
3673 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3674 {
3675         /* TODO allow them to set a preferred stripe size */
3676         return 64 * 1024;
3677 }
3678
3679 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3680 {
3681         if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3682                 return;
3683
3684         btrfs_set_fs_incompat(info, RAID56);
3685 }
3686
3687 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3688                                struct btrfs_root *extent_root, u64 start,
3689                                u64 type)
3690 {
3691         struct btrfs_fs_info *info = extent_root->fs_info;
3692         struct btrfs_fs_devices *fs_devices = info->fs_devices;
3693         struct list_head *cur;
3694         struct map_lookup *map = NULL;
3695         struct extent_map_tree *em_tree;
3696         struct extent_map *em;
3697         struct btrfs_device_info *devices_info = NULL;
3698         u64 total_avail;
3699         int num_stripes;        /* total number of stripes to allocate */
3700         int data_stripes;       /* number of stripes that count for
3701                                    block group size */
3702         int sub_stripes;        /* sub_stripes info for map */
3703         int dev_stripes;        /* stripes per dev */
3704         int devs_max;           /* max devs to use */
3705         int devs_min;           /* min devs needed */
3706         int devs_increment;     /* ndevs has to be a multiple of this */
3707         int ncopies;            /* how many copies to data has */
3708         int ret;
3709         u64 max_stripe_size;
3710         u64 max_chunk_size;
3711         u64 stripe_size;
3712         u64 num_bytes;
3713         u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3714         int ndevs;
3715         int i;
3716         int j;
3717         int index;
3718
3719         BUG_ON(!alloc_profile_is_valid(type, 0));
3720
3721         if (list_empty(&fs_devices->alloc_list))
3722                 return -ENOSPC;
3723
3724         index = __get_raid_index(type);
3725
3726         sub_stripes = btrfs_raid_array[index].sub_stripes;
3727         dev_stripes = btrfs_raid_array[index].dev_stripes;
3728         devs_max = btrfs_raid_array[index].devs_max;
3729         devs_min = btrfs_raid_array[index].devs_min;
3730         devs_increment = btrfs_raid_array[index].devs_increment;
3731         ncopies = btrfs_raid_array[index].ncopies;
3732
3733         if (type & BTRFS_BLOCK_GROUP_DATA) {
3734                 max_stripe_size = 1024 * 1024 * 1024;
3735                 max_chunk_size = 10 * max_stripe_size;
3736         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3737                 /* for larger filesystems, use larger metadata chunks */
3738                 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3739                         max_stripe_size = 1024 * 1024 * 1024;
3740                 else
3741                         max_stripe_size = 256 * 1024 * 1024;
3742                 max_chunk_size = max_stripe_size;
3743         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3744                 max_stripe_size = 32 * 1024 * 1024;
3745                 max_chunk_size = 2 * max_stripe_size;
3746         } else {
3747                 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3748                        type);
3749                 BUG_ON(1);
3750         }
3751
3752         /* we don't want a chunk larger than 10% of writeable space */
3753         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3754                              max_chunk_size);
3755
3756         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3757                                GFP_NOFS);
3758         if (!devices_info)
3759                 return -ENOMEM;
3760
3761         cur = fs_devices->alloc_list.next;
3762
3763         /*
3764          * in the first pass through the devices list, we gather information
3765          * about the available holes on each device.
3766          */
3767         ndevs = 0;
3768         while (cur != &fs_devices->alloc_list) {
3769                 struct btrfs_device *device;
3770                 u64 max_avail;
3771                 u64 dev_offset;
3772
3773                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3774
3775                 cur = cur->next;
3776
3777                 if (!device->writeable) {
3778                         WARN(1, KERN_ERR
3779                                "btrfs: read-only device in alloc_list\n");
3780                         continue;
3781                 }
3782
3783                 if (!device->in_fs_metadata ||
3784                     device->is_tgtdev_for_dev_replace)
3785                         continue;
3786
3787                 if (device->total_bytes > device->bytes_used)
3788                         total_avail = device->total_bytes - device->bytes_used;
3789                 else
3790                         total_avail = 0;
3791
3792                 /* If there is no space on this device, skip it. */
3793                 if (total_avail == 0)
3794                         continue;
3795
3796                 ret = find_free_dev_extent(trans, device,
3797                                            max_stripe_size * dev_stripes,
3798                                            &dev_offset, &max_avail);
3799                 if (ret && ret != -ENOSPC)
3800                         goto error;
3801
3802                 if (ret == 0)
3803                         max_avail = max_stripe_size * dev_stripes;
3804
3805                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3806                         continue;
3807
3808                 if (ndevs == fs_devices->rw_devices) {
3809                         WARN(1, "%s: found more than %llu devices\n",
3810                              __func__, fs_devices->rw_devices);
3811                         break;
3812                 }
3813                 devices_info[ndevs].dev_offset = dev_offset;
3814                 devices_info[ndevs].max_avail = max_avail;
3815                 devices_info[ndevs].total_avail = total_avail;
3816                 devices_info[ndevs].dev = device;
3817                 ++ndevs;
3818         }
3819
3820         /*
3821          * now sort the devices by hole size / available space
3822          */
3823         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3824              btrfs_cmp_device_info, NULL);
3825
3826         /* round down to number of usable stripes */
3827         ndevs -= ndevs % devs_increment;
3828
3829         if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3830                 ret = -ENOSPC;
3831                 goto error;
3832         }
3833
3834         if (devs_max && ndevs > devs_max)
3835                 ndevs = devs_max;
3836         /*
3837          * the primary goal is to maximize the number of stripes, so use as many
3838          * devices as possible, even if the stripes are not maximum sized.
3839          */
3840         stripe_size = devices_info[ndevs-1].max_avail;
3841         num_stripes = ndevs * dev_stripes;
3842
3843         /*
3844          * this will have to be fixed for RAID1 and RAID10 over
3845          * more drives
3846          */
3847         data_stripes = num_stripes / ncopies;
3848
3849         if (type & BTRFS_BLOCK_GROUP_RAID5) {
3850                 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
3851                                  btrfs_super_stripesize(info->super_copy));
3852                 data_stripes = num_stripes - 1;
3853         }
3854         if (type & BTRFS_BLOCK_GROUP_RAID6) {
3855                 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
3856                                  btrfs_super_stripesize(info->super_copy));
3857                 data_stripes = num_stripes - 2;
3858         }
3859
3860         /*
3861          * Use the number of data stripes to figure out how big this chunk
3862          * is really going to be in terms of logical address space,
3863          * and compare that answer with the max chunk size
3864          */
3865         if (stripe_size * data_stripes > max_chunk_size) {
3866                 u64 mask = (1ULL << 24) - 1;
3867                 stripe_size = max_chunk_size;
3868                 do_div(stripe_size, data_stripes);
3869
3870                 /* bump the answer up to a 16MB boundary */
3871                 stripe_size = (stripe_size + mask) & ~mask;
3872
3873                 /* but don't go higher than the limits we found
3874                  * while searching for free extents
3875                  */
3876                 if (stripe_size > devices_info[ndevs-1].max_avail)
3877                         stripe_size = devices_info[ndevs-1].max_avail;
3878         }
3879
3880         do_div(stripe_size, dev_stripes);
3881
3882         /* align to BTRFS_STRIPE_LEN */
3883         do_div(stripe_size, raid_stripe_len);
3884         stripe_size *= raid_stripe_len;
3885
3886         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3887         if (!map) {
3888                 ret = -ENOMEM;
3889                 goto error;
3890         }
3891         map->num_stripes = num_stripes;
3892
3893         for (i = 0; i < ndevs; ++i) {
3894                 for (j = 0; j < dev_stripes; ++j) {
3895                         int s = i * dev_stripes + j;
3896                         map->stripes[s].dev = devices_info[i].dev;
3897                         map->stripes[s].physical = devices_info[i].dev_offset +
3898                                                    j * stripe_size;
3899                 }
3900         }
3901         map->sector_size = extent_root->sectorsize;
3902         map->stripe_len = raid_stripe_len;
3903         map->io_align = raid_stripe_len;
3904         map->io_width = raid_stripe_len;
3905         map->type = type;
3906         map->sub_stripes = sub_stripes;
3907
3908         num_bytes = stripe_size * data_stripes;
3909
3910         trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3911
3912         em = alloc_extent_map();
3913         if (!em) {
3914                 ret = -ENOMEM;
3915                 goto error;
3916         }
3917         em->bdev = (struct block_device *)map;
3918         em->start = start;
3919         em->len = num_bytes;
3920         em->block_start = 0;
3921         em->block_len = em->len;
3922         em->orig_block_len = stripe_size;
3923
3924         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3925         write_lock(&em_tree->lock);
3926         ret = add_extent_mapping(em_tree, em, 0);
3927         if (!ret) {
3928                 list_add_tail(&em->list, &trans->transaction->pending_chunks);
3929                 atomic_inc(&em->refs);
3930         }
3931         write_unlock(&em_tree->lock);
3932         if (ret) {
3933                 free_extent_map(em);
3934                 goto error;
3935         }
3936
3937         ret = btrfs_make_block_group(trans, extent_root, 0, type,
3938                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3939                                      start, num_bytes);
3940         if (ret)
3941                 goto error_del_extent;
3942
3943         free_extent_map(em);
3944         check_raid56_incompat_flag(extent_root->fs_info, type);
3945
3946         kfree(devices_info);
3947         return 0;
3948
3949 error_del_extent:
3950         write_lock(&em_tree->lock);
3951         remove_extent_mapping(em_tree, em);
3952         write_unlock(&em_tree->lock);
3953
3954         /* One for our allocation */
3955         free_extent_map(em);
3956         /* One for the tree reference */
3957         free_extent_map(em);
3958 error:
3959         kfree(map);
3960         kfree(devices_info);
3961         return ret;
3962 }
3963
3964 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
3965                                 struct btrfs_root *extent_root,
3966                                 u64 chunk_offset, u64 chunk_size)
3967 {
3968         struct btrfs_key key;
3969         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3970         struct btrfs_device *device;
3971         struct btrfs_chunk *chunk;
3972         struct btrfs_stripe *stripe;
3973         struct extent_map_tree *em_tree;
3974         struct extent_map *em;
3975         struct map_lookup *map;
3976         size_t item_size;
3977         u64 dev_offset;
3978         u64 stripe_size;
3979         int i = 0;
3980         int ret;
3981
3982         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3983         read_lock(&em_tree->lock);
3984         em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
3985         read_unlock(&em_tree->lock);
3986
3987         if (!em) {
3988                 btrfs_crit(extent_root->fs_info, "unable to find logical "
3989                            "%Lu len %Lu", chunk_offset, chunk_size);
3990                 return -EINVAL;
3991         }
3992
3993         if (em->start != chunk_offset || em->len != chunk_size) {
3994                 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
3995                           " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
3996                           chunk_size, em->start, em->len);
3997                 free_extent_map(em);
3998                 return -EINVAL;
3999         }
4000
4001         map = (struct map_lookup *)em->bdev;
4002         item_size = btrfs_chunk_item_size(map->num_stripes);
4003         stripe_size = em->orig_block_len;
4004
4005         chunk = kzalloc(item_size, GFP_NOFS);
4006         if (!chunk) {
4007                 ret = -ENOMEM;
4008                 goto out;
4009         }
4010
4011         for (i = 0; i < map->num_stripes; i++) {
4012                 device = map->stripes[i].dev;
4013                 dev_offset = map->stripes[i].physical;
4014
4015                 device->bytes_used += stripe_size;
4016                 ret = btrfs_update_device(trans, device);
4017                 if (ret)
4018                         goto out;
4019                 ret = btrfs_alloc_dev_extent(trans, device,
4020                                              chunk_root->root_key.objectid,
4021                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4022                                              chunk_offset, dev_offset,
4023                                              stripe_size);
4024                 if (ret)
4025                         goto out;
4026         }
4027
4028         spin_lock(&extent_root->fs_info->free_chunk_lock);
4029         extent_root->fs_info->free_chunk_space -= (stripe_size *
4030                                                    map->num_stripes);
4031         spin_unlock(&extent_root->fs_info->free_chunk_lock);
4032
4033         stripe = &chunk->stripe;
4034         for (i = 0; i < map->num_stripes; i++) {
4035                 device = map->stripes[i].dev;
4036                 dev_offset = map->stripes[i].physical;
4037
4038                 btrfs_set_stack_stripe_devid(stripe, device->devid);
4039                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4040                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4041                 stripe++;
4042         }
4043
4044         btrfs_set_stack_chunk_length(chunk, chunk_size);
4045         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4046         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4047         btrfs_set_stack_chunk_type(chunk, map->type);
4048         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4049         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4050         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4051         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4052         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4053
4054         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4055         key.type = BTRFS_CHUNK_ITEM_KEY;
4056         key.offset = chunk_offset;
4057
4058         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4059         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4060                 /*
4061                  * TODO: Cleanup of inserted chunk root in case of
4062                  * failure.
4063                  */
4064                 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4065                                              item_size);
4066         }
4067
4068 out:
4069         kfree(chunk);
4070         free_extent_map(em);
4071         return ret;
4072 }
4073
4074 /*
4075  * Chunk allocation falls into two parts. The first part does works
4076  * that make the new allocated chunk useable, but not do any operation
4077  * that modifies the chunk tree. The second part does the works that
4078  * require modifying the chunk tree. This division is important for the
4079  * bootstrap process of adding storage to a seed btrfs.
4080  */
4081 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4082                       struct btrfs_root *extent_root, u64 type)
4083 {
4084         u64 chunk_offset;
4085
4086         chunk_offset = find_next_chunk(extent_root->fs_info);
4087         return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4088 }
4089
4090 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4091                                          struct btrfs_root *root,
4092                                          struct btrfs_device *device)
4093 {
4094         u64 chunk_offset;
4095         u64 sys_chunk_offset;
4096         u64 alloc_profile;
4097         struct btrfs_fs_info *fs_info = root->fs_info;
4098         struct btrfs_root *extent_root = fs_info->extent_root;
4099         int ret;
4100
4101         chunk_offset = find_next_chunk(fs_info);
4102         alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4103         ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4104                                   alloc_profile);
4105         if (ret)
4106                 return ret;
4107
4108         sys_chunk_offset = find_next_chunk(root->fs_info);
4109         alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4110         ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4111                                   alloc_profile);
4112         if (ret) {
4113                 btrfs_abort_transaction(trans, root, ret);
4114                 goto out;
4115         }
4116
4117         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4118         if (ret)
4119                 btrfs_abort_transaction(trans, root, ret);
4120 out:
4121         return ret;
4122 }
4123
4124 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4125 {
4126         struct extent_map *em;
4127         struct map_lookup *map;
4128         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4129         int readonly = 0;
4130         int i;
4131
4132         read_lock(&map_tree->map_tree.lock);
4133         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4134         read_unlock(&map_tree->map_tree.lock);
4135         if (!em)
4136                 return 1;
4137
4138         if (btrfs_test_opt(root, DEGRADED)) {
4139                 free_extent_map(em);
4140                 return 0;
4141         }
4142
4143         map = (struct map_lookup *)em->bdev;
4144         for (i = 0; i < map->num_stripes; i++) {
4145                 if (!map->stripes[i].dev->writeable) {
4146                         readonly = 1;
4147                         break;
4148                 }
4149         }
4150         free_extent_map(em);
4151         return readonly;
4152 }
4153
4154 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4155 {
4156         extent_map_tree_init(&tree->map_tree);
4157 }
4158
4159 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4160 {
4161         struct extent_map *em;
4162
4163         while (1) {
4164                 write_lock(&tree->map_tree.lock);
4165                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4166                 if (em)
4167                         remove_extent_mapping(&tree->map_tree, em);
4168                 write_unlock(&tree->map_tree.lock);
4169                 if (!em)
4170                         break;
4171                 kfree(em->bdev);
4172                 /* once for us */
4173                 free_extent_map(em);
4174                 /* once for the tree */
4175                 free_extent_map(em);
4176         }
4177 }
4178
4179 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4180 {
4181         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4182         struct extent_map *em;
4183         struct map_lookup *map;
4184         struct extent_map_tree *em_tree = &map_tree->map_tree;
4185         int ret;
4186
4187         read_lock(&em_tree->lock);
4188         em = lookup_extent_mapping(em_tree, logical, len);
4189         read_unlock(&em_tree->lock);
4190
4191         /*
4192          * We could return errors for these cases, but that could get ugly and
4193          * we'd probably do the same thing which is just not do anything else
4194          * and exit, so return 1 so the callers don't try to use other copies.
4195          */
4196         if (!em) {
4197                 btrfs_emerg(fs_info, "No mapping for %Lu-%Lu\n", logical,
4198                             logical+len);
4199                 return 1;
4200         }
4201
4202         if (em->start > logical || em->start + em->len < logical) {
4203                 btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
4204                             "%Lu-%Lu\n", logical, logical+len, em->start,
4205                             em->start + em->len);
4206                 return 1;
4207         }
4208
4209         map = (struct map_lookup *)em->bdev;
4210         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4211                 ret = map->num_stripes;
4212         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4213                 ret = map->sub_stripes;
4214         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4215                 ret = 2;
4216         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4217                 ret = 3;
4218         else
4219                 ret = 1;
4220         free_extent_map(em);
4221
4222         btrfs_dev_replace_lock(&fs_info->dev_replace);
4223         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4224                 ret++;
4225         btrfs_dev_replace_unlock(&fs_info->dev_replace);
4226
4227         return ret;
4228 }
4229
4230 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4231                                     struct btrfs_mapping_tree *map_tree,
4232                                     u64 logical)
4233 {
4234         struct extent_map *em;
4235         struct map_lookup *map;
4236         struct extent_map_tree *em_tree = &map_tree->map_tree;
4237         unsigned long len = root->sectorsize;
4238
4239         read_lock(&em_tree->lock);
4240         em = lookup_extent_mapping(em_tree, logical, len);
4241         read_unlock(&em_tree->lock);
4242         BUG_ON(!em);
4243
4244         BUG_ON(em->start > logical || em->start + em->len < logical);
4245         map = (struct map_lookup *)em->bdev;
4246         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4247                          BTRFS_BLOCK_GROUP_RAID6)) {
4248                 len = map->stripe_len * nr_data_stripes(map);
4249         }
4250         free_extent_map(em);
4251         return len;
4252 }
4253
4254 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4255                            u64 logical, u64 len, int mirror_num)
4256 {
4257         struct extent_map *em;
4258         struct map_lookup *map;
4259         struct extent_map_tree *em_tree = &map_tree->map_tree;
4260         int ret = 0;
4261
4262         read_lock(&em_tree->lock);
4263         em = lookup_extent_mapping(em_tree, logical, len);
4264         read_unlock(&em_tree->lock);
4265         BUG_ON(!em);
4266
4267         BUG_ON(em->start > logical || em->start + em->len < logical);
4268         map = (struct map_lookup *)em->bdev;
4269         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4270                          BTRFS_BLOCK_GROUP_RAID6))
4271                 ret = 1;
4272         free_extent_map(em);
4273         return ret;
4274 }
4275
4276 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4277                             struct map_lookup *map, int first, int num,
4278                             int optimal, int dev_replace_is_ongoing)
4279 {
4280         int i;
4281         int tolerance;
4282         struct btrfs_device *srcdev;
4283
4284         if (dev_replace_is_ongoing &&
4285             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4286              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4287                 srcdev = fs_info->dev_replace.srcdev;
4288         else
4289                 srcdev = NULL;
4290
4291         /*
4292          * try to avoid the drive that is the source drive for a
4293          * dev-replace procedure, only choose it if no other non-missing
4294          * mirror is available
4295          */
4296         for (tolerance = 0; tolerance < 2; tolerance++) {
4297                 if (map->stripes[optimal].dev->bdev &&
4298                     (tolerance || map->stripes[optimal].dev != srcdev))
4299                         return optimal;
4300                 for (i = first; i < first + num; i++) {
4301                         if (map->stripes[i].dev->bdev &&
4302                             (tolerance || map->stripes[i].dev != srcdev))
4303                                 return i;
4304                 }
4305         }
4306
4307         /* we couldn't find one that doesn't fail.  Just return something
4308          * and the io error handling code will clean up eventually
4309          */
4310         return optimal;
4311 }
4312
4313 static inline int parity_smaller(u64 a, u64 b)
4314 {
4315         return a > b;
4316 }
4317
4318 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4319 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4320 {
4321         struct btrfs_bio_stripe s;
4322         int i;
4323         u64 l;
4324         int again = 1;
4325
4326         while (again) {
4327                 again = 0;
4328                 for (i = 0; i < bbio->num_stripes - 1; i++) {
4329                         if (parity_smaller(raid_map[i], raid_map[i+1])) {
4330                                 s = bbio->stripes[i];
4331                                 l = raid_map[i];
4332                                 bbio->stripes[i] = bbio->stripes[i+1];
4333                                 raid_map[i] = raid_map[i+1];
4334                                 bbio->stripes[i+1] = s;
4335                                 raid_map[i+1] = l;
4336                                 again = 1;
4337                         }
4338                 }
4339         }
4340 }
4341
4342 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4343                              u64 logical, u64 *length,
4344                              struct btrfs_bio **bbio_ret,
4345                              int mirror_num, u64 **raid_map_ret)
4346 {
4347         struct extent_map *em;
4348         struct map_lookup *map;
4349         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4350         struct extent_map_tree *em_tree = &map_tree->map_tree;
4351         u64 offset;
4352         u64 stripe_offset;
4353         u64 stripe_end_offset;
4354         u64 stripe_nr;
4355         u64 stripe_nr_orig;
4356         u64 stripe_nr_end;
4357         u64 stripe_len;
4358         u64 *raid_map = NULL;
4359         int stripe_index;
4360         int i;
4361         int ret = 0;
4362         int num_stripes;
4363         int max_errors = 0;
4364         struct btrfs_bio *bbio = NULL;
4365         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4366         int dev_replace_is_ongoing = 0;
4367         int num_alloc_stripes;
4368         int patch_the_first_stripe_for_dev_replace = 0;
4369         u64 physical_to_patch_in_first_stripe = 0;
4370         u64 raid56_full_stripe_start = (u64)-1;
4371
4372         read_lock(&em_tree->lock);
4373         em = lookup_extent_mapping(em_tree, logical, *length);
4374         read_unlock(&em_tree->lock);
4375
4376         if (!em) {
4377                 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4378                         (unsigned long long)logical,
4379                         (unsigned long long)*length);
4380                 return -EINVAL;
4381         }
4382
4383         if (em->start > logical || em->start + em->len < logical) {
4384                 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4385                            "found %Lu-%Lu\n", logical, em->start,
4386                            em->start + em->len);
4387                 return -EINVAL;
4388         }
4389
4390         map = (struct map_lookup *)em->bdev;
4391         offset = logical - em->start;
4392
4393         stripe_len = map->stripe_len;
4394         stripe_nr = offset;
4395         /*
4396          * stripe_nr counts the total number of stripes we have to stride
4397          * to get to this block
4398          */
4399         do_div(stripe_nr, stripe_len);
4400
4401         stripe_offset = stripe_nr * stripe_len;
4402         BUG_ON(offset < stripe_offset);
4403
4404         /* stripe_offset is the offset of this block in its stripe*/
4405         stripe_offset = offset - stripe_offset;
4406
4407         /* if we're here for raid56, we need to know the stripe aligned start */
4408         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4409                 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4410                 raid56_full_stripe_start = offset;
4411
4412                 /* allow a write of a full stripe, but make sure we don't
4413                  * allow straddling of stripes
4414                  */
4415                 do_div(raid56_full_stripe_start, full_stripe_len);
4416                 raid56_full_stripe_start *= full_stripe_len;
4417         }
4418
4419         if (rw & REQ_DISCARD) {
4420                 /* we don't discard raid56 yet */
4421                 if (map->type &
4422                     (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4423                         ret = -EOPNOTSUPP;
4424                         goto out;
4425                 }
4426                 *length = min_t(u64, em->len - offset, *length);
4427         } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4428                 u64 max_len;
4429                 /* For writes to RAID[56], allow a full stripeset across all disks.
4430                    For other RAID types and for RAID[56] reads, just allow a single
4431                    stripe (on a single disk). */
4432                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4433                     (rw & REQ_WRITE)) {
4434                         max_len = stripe_len * nr_data_stripes(map) -
4435                                 (offset - raid56_full_stripe_start);
4436                 } else {
4437                         /* we limit the length of each bio to what fits in a stripe */
4438                         max_len = stripe_len - stripe_offset;
4439                 }
4440                 *length = min_t(u64, em->len - offset, max_len);
4441         } else {
4442                 *length = em->len - offset;
4443         }
4444
4445         /* This is for when we're called from btrfs_merge_bio_hook() and all
4446            it cares about is the length */
4447         if (!bbio_ret)
4448                 goto out;
4449
4450         btrfs_dev_replace_lock(dev_replace);
4451         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4452         if (!dev_replace_is_ongoing)
4453                 btrfs_dev_replace_unlock(dev_replace);
4454
4455         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4456             !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4457             dev_replace->tgtdev != NULL) {
4458                 /*
4459                  * in dev-replace case, for repair case (that's the only
4460                  * case where the mirror is selected explicitly when
4461                  * calling btrfs_map_block), blocks left of the left cursor
4462                  * can also be read from the target drive.
4463                  * For REQ_GET_READ_MIRRORS, the target drive is added as
4464                  * the last one to the array of stripes. For READ, it also
4465                  * needs to be supported using the same mirror number.
4466                  * If the requested block is not left of the left cursor,
4467                  * EIO is returned. This can happen because btrfs_num_copies()
4468                  * returns one more in the dev-replace case.
4469                  */
4470                 u64 tmp_length = *length;
4471                 struct btrfs_bio *tmp_bbio = NULL;
4472                 int tmp_num_stripes;
4473                 u64 srcdev_devid = dev_replace->srcdev->devid;
4474                 int index_srcdev = 0;
4475                 int found = 0;
4476                 u64 physical_of_found = 0;
4477
4478                 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4479                              logical, &tmp_length, &tmp_bbio, 0, NULL);
4480                 if (ret) {
4481                         WARN_ON(tmp_bbio != NULL);
4482                         goto out;
4483                 }
4484
4485                 tmp_num_stripes = tmp_bbio->num_stripes;
4486                 if (mirror_num > tmp_num_stripes) {
4487                         /*
4488                          * REQ_GET_READ_MIRRORS does not contain this
4489                          * mirror, that means that the requested area
4490                          * is not left of the left cursor
4491                          */
4492                         ret = -EIO;
4493                         kfree(tmp_bbio);
4494                         goto out;
4495                 }
4496
4497                 /*
4498                  * process the rest of the function using the mirror_num
4499                  * of the source drive. Therefore look it up first.
4500                  * At the end, patch the device pointer to the one of the
4501                  * target drive.
4502                  */
4503                 for (i = 0; i < tmp_num_stripes; i++) {
4504                         if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4505                                 /*
4506                                  * In case of DUP, in order to keep it
4507                                  * simple, only add the mirror with the
4508                                  * lowest physical address
4509                                  */
4510                                 if (found &&
4511                                     physical_of_found <=
4512                                      tmp_bbio->stripes[i].physical)
4513                                         continue;
4514                                 index_srcdev = i;
4515                                 found = 1;
4516                                 physical_of_found =
4517                                         tmp_bbio->stripes[i].physical;
4518                         }
4519                 }
4520
4521                 if (found) {
4522                         mirror_num = index_srcdev + 1;
4523                         patch_the_first_stripe_for_dev_replace = 1;
4524                         physical_to_patch_in_first_stripe = physical_of_found;
4525                 } else {
4526                         WARN_ON(1);
4527                         ret = -EIO;
4528                         kfree(tmp_bbio);
4529                         goto out;
4530                 }
4531
4532                 kfree(tmp_bbio);
4533         } else if (mirror_num > map->num_stripes) {
4534                 mirror_num = 0;
4535         }
4536
4537         num_stripes = 1;
4538         stripe_index = 0;
4539         stripe_nr_orig = stripe_nr;
4540         stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4541         do_div(stripe_nr_end, map->stripe_len);
4542         stripe_end_offset = stripe_nr_end * map->stripe_len -
4543                             (offset + *length);
4544
4545         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4546                 if (rw & REQ_DISCARD)
4547                         num_stripes = min_t(u64, map->num_stripes,
4548                                             stripe_nr_end - stripe_nr_orig);
4549                 stripe_index = do_div(stripe_nr, map->num_stripes);
4550         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4551                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4552                         num_stripes = map->num_stripes;
4553                 else if (mirror_num)
4554                         stripe_index = mirror_num - 1;
4555                 else {
4556                         stripe_index = find_live_mirror(fs_info, map, 0,
4557                                             map->num_stripes,
4558                                             current->pid % map->num_stripes,
4559                                             dev_replace_is_ongoing);
4560                         mirror_num = stripe_index + 1;
4561                 }
4562
4563         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4564                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4565                         num_stripes = map->num_stripes;
4566                 } else if (mirror_num) {
4567                         stripe_index = mirror_num - 1;
4568                 } else {
4569                         mirror_num = 1;
4570                 }
4571
4572         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4573                 int factor = map->num_stripes / map->sub_stripes;
4574
4575                 stripe_index = do_div(stripe_nr, factor);
4576                 stripe_index *= map->sub_stripes;
4577
4578                 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4579                         num_stripes = map->sub_stripes;
4580                 else if (rw & REQ_DISCARD)
4581                         num_stripes = min_t(u64, map->sub_stripes *
4582                                             (stripe_nr_end - stripe_nr_orig),
4583                                             map->num_stripes);
4584                 else if (mirror_num)
4585                         stripe_index += mirror_num - 1;
4586                 else {
4587                         int old_stripe_index = stripe_index;
4588                         stripe_index = find_live_mirror(fs_info, map,
4589                                               stripe_index,
4590                                               map->sub_stripes, stripe_index +
4591                                               current->pid % map->sub_stripes,
4592                                               dev_replace_is_ongoing);
4593                         mirror_num = stripe_index - old_stripe_index + 1;
4594                 }
4595
4596         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4597                                 BTRFS_BLOCK_GROUP_RAID6)) {
4598                 u64 tmp;
4599
4600                 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4601                     && raid_map_ret) {
4602                         int i, rot;
4603
4604                         /* push stripe_nr back to the start of the full stripe */
4605                         stripe_nr = raid56_full_stripe_start;
4606                         do_div(stripe_nr, stripe_len);
4607
4608                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4609
4610                         /* RAID[56] write or recovery. Return all stripes */
4611                         num_stripes = map->num_stripes;
4612                         max_errors = nr_parity_stripes(map);
4613
4614                         raid_map = kmalloc(sizeof(u64) * num_stripes,
4615                                            GFP_NOFS);
4616                         if (!raid_map) {
4617                                 ret = -ENOMEM;
4618                                 goto out;
4619                         }
4620
4621                         /* Work out the disk rotation on this stripe-set */
4622                         tmp = stripe_nr;
4623                         rot = do_div(tmp, num_stripes);
4624
4625                         /* Fill in the logical address of each stripe */
4626                         tmp = stripe_nr * nr_data_stripes(map);
4627                         for (i = 0; i < nr_data_stripes(map); i++)
4628                                 raid_map[(i+rot) % num_stripes] =
4629                                         em->start + (tmp + i) * map->stripe_len;
4630
4631                         raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4632                         if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4633                                 raid_map[(i+rot+1) % num_stripes] =
4634                                         RAID6_Q_STRIPE;
4635
4636                         *length = map->stripe_len;
4637                         stripe_index = 0;
4638                         stripe_offset = 0;
4639                 } else {
4640                         /*
4641                          * Mirror #0 or #1 means the original data block.
4642                          * Mirror #2 is RAID5 parity block.
4643                          * Mirror #3 is RAID6 Q block.
4644                          */
4645                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4646                         if (mirror_num > 1)
4647                                 stripe_index = nr_data_stripes(map) +
4648                                                 mirror_num - 2;
4649
4650                         /* We distribute the parity blocks across stripes */
4651                         tmp = stripe_nr + stripe_index;
4652                         stripe_index = do_div(tmp, map->num_stripes);
4653                 }
4654         } else {
4655                 /*
4656                  * after this do_div call, stripe_nr is the number of stripes
4657                  * on this device we have to walk to find the data, and
4658                  * stripe_index is the number of our device in the stripe array
4659                  */
4660                 stripe_index = do_div(stripe_nr, map->num_stripes);
4661                 mirror_num = stripe_index + 1;
4662         }
4663         BUG_ON(stripe_index >= map->num_stripes);
4664
4665         num_alloc_stripes = num_stripes;
4666         if (dev_replace_is_ongoing) {
4667                 if (rw & (REQ_WRITE | REQ_DISCARD))
4668                         num_alloc_stripes <<= 1;
4669                 if (rw & REQ_GET_READ_MIRRORS)
4670                         num_alloc_stripes++;
4671         }
4672         bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4673         if (!bbio) {
4674                 ret = -ENOMEM;
4675                 goto out;
4676         }
4677         atomic_set(&bbio->error, 0);
4678
4679         if (rw & REQ_DISCARD) {
4680                 int factor = 0;
4681                 int sub_stripes = 0;
4682                 u64 stripes_per_dev = 0;
4683                 u32 remaining_stripes = 0;
4684                 u32 last_stripe = 0;
4685
4686                 if (map->type &
4687                     (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4688                         if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4689                                 sub_stripes = 1;
4690                         else
4691                                 sub_stripes = map->sub_stripes;
4692
4693                         factor = map->num_stripes / sub_stripes;
4694                         stripes_per_dev = div_u64_rem(stripe_nr_end -
4695                                                       stripe_nr_orig,
4696                                                       factor,
4697                                                       &remaining_stripes);
4698                         div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4699                         last_stripe *= sub_stripes;
4700                 }
4701
4702                 for (i = 0; i < num_stripes; i++) {
4703                         bbio->stripes[i].physical =
4704                                 map->stripes[stripe_index].physical +
4705                                 stripe_offset + stripe_nr * map->stripe_len;
4706                         bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4707
4708                         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4709                                          BTRFS_BLOCK_GROUP_RAID10)) {
4710                                 bbio->stripes[i].length = stripes_per_dev *
4711                                                           map->stripe_len;
4712
4713                                 if (i / sub_stripes < remaining_stripes)
4714                                         bbio->stripes[i].length +=
4715                                                 map->stripe_len;
4716
4717                                 /*
4718                                  * Special for the first stripe and
4719                                  * the last stripe:
4720                                  *
4721                                  * |-------|...|-------|
4722                                  *     |----------|
4723                                  *    off     end_off
4724                                  */
4725                                 if (i < sub_stripes)
4726                                         bbio->stripes[i].length -=
4727                                                 stripe_offset;
4728
4729                                 if (stripe_index >= last_stripe &&
4730                                     stripe_index <= (last_stripe +
4731                                                      sub_stripes - 1))
4732                                         bbio->stripes[i].length -=
4733                                                 stripe_end_offset;
4734
4735                                 if (i == sub_stripes - 1)
4736                                         stripe_offset = 0;
4737                         } else
4738                                 bbio->stripes[i].length = *length;
4739
4740                         stripe_index++;
4741                         if (stripe_index == map->num_stripes) {
4742                                 /* This could only happen for RAID0/10 */
4743                                 stripe_index = 0;
4744                                 stripe_nr++;
4745                         }
4746                 }
4747         } else {
4748                 for (i = 0; i < num_stripes; i++) {
4749                         bbio->stripes[i].physical =
4750                                 map->stripes[stripe_index].physical +
4751                                 stripe_offset +
4752                                 stripe_nr * map->stripe_len;
4753                         bbio->stripes[i].dev =
4754                                 map->stripes[stripe_index].dev;
4755                         stripe_index++;
4756                 }
4757         }
4758
4759         if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4760                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4761                                  BTRFS_BLOCK_GROUP_RAID10 |
4762                                  BTRFS_BLOCK_GROUP_RAID5 |
4763                                  BTRFS_BLOCK_GROUP_DUP)) {
4764                         max_errors = 1;
4765                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4766                         max_errors = 2;
4767                 }
4768         }
4769
4770         if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4771             dev_replace->tgtdev != NULL) {
4772                 int index_where_to_add;
4773                 u64 srcdev_devid = dev_replace->srcdev->devid;
4774
4775                 /*
4776                  * duplicate the write operations while the dev replace
4777                  * procedure is running. Since the copying of the old disk
4778                  * to the new disk takes place at run time while the
4779                  * filesystem is mounted writable, the regular write
4780                  * operations to the old disk have to be duplicated to go
4781                  * to the new disk as well.
4782                  * Note that device->missing is handled by the caller, and
4783                  * that the write to the old disk is already set up in the
4784                  * stripes array.
4785                  */
4786                 index_where_to_add = num_stripes;
4787                 for (i = 0; i < num_stripes; i++) {
4788                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
4789                                 /* write to new disk, too */
4790                                 struct btrfs_bio_stripe *new =
4791                                         bbio->stripes + index_where_to_add;
4792                                 struct btrfs_bio_stripe *old =
4793                                         bbio->stripes + i;
4794
4795                                 new->physical = old->physical;
4796                                 new->length = old->length;
4797                                 new->dev = dev_replace->tgtdev;
4798                                 index_where_to_add++;
4799                                 max_errors++;
4800                         }
4801                 }
4802                 num_stripes = index_where_to_add;
4803         } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4804                    dev_replace->tgtdev != NULL) {
4805                 u64 srcdev_devid = dev_replace->srcdev->devid;
4806                 int index_srcdev = 0;
4807                 int found = 0;
4808                 u64 physical_of_found = 0;
4809
4810                 /*
4811                  * During the dev-replace procedure, the target drive can
4812                  * also be used to read data in case it is needed to repair
4813                  * a corrupt block elsewhere. This is possible if the
4814                  * requested area is left of the left cursor. In this area,
4815                  * the target drive is a full copy of the source drive.
4816                  */
4817                 for (i = 0; i < num_stripes; i++) {
4818                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
4819                                 /*
4820                                  * In case of DUP, in order to keep it
4821                                  * simple, only add the mirror with the
4822                                  * lowest physical address
4823                                  */
4824                                 if (found &&
4825                                     physical_of_found <=
4826                                      bbio->stripes[i].physical)
4827                                         continue;
4828                                 index_srcdev = i;
4829                                 found = 1;
4830                                 physical_of_found = bbio->stripes[i].physical;
4831                         }
4832                 }
4833                 if (found) {
4834                         u64 length = map->stripe_len;
4835
4836                         if (physical_of_found + length <=
4837                             dev_replace->cursor_left) {
4838                                 struct btrfs_bio_stripe *tgtdev_stripe =
4839                                         bbio->stripes + num_stripes;
4840
4841                                 tgtdev_stripe->physical = physical_of_found;
4842                                 tgtdev_stripe->length =
4843                                         bbio->stripes[index_srcdev].length;
4844                                 tgtdev_stripe->dev = dev_replace->tgtdev;
4845
4846                                 num_stripes++;
4847                         }
4848                 }
4849         }
4850
4851         *bbio_ret = bbio;
4852         bbio->num_stripes = num_stripes;
4853         bbio->max_errors = max_errors;
4854         bbio->mirror_num = mirror_num;
4855
4856         /*
4857          * this is the case that REQ_READ && dev_replace_is_ongoing &&
4858          * mirror_num == num_stripes + 1 && dev_replace target drive is
4859          * available as a mirror
4860          */
4861         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4862                 WARN_ON(num_stripes > 1);
4863                 bbio->stripes[0].dev = dev_replace->tgtdev;
4864                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4865                 bbio->mirror_num = map->num_stripes + 1;
4866         }
4867         if (raid_map) {
4868                 sort_parity_stripes(bbio, raid_map);
4869                 *raid_map_ret = raid_map;
4870         }
4871 out:
4872         if (dev_replace_is_ongoing)
4873                 btrfs_dev_replace_unlock(dev_replace);
4874         free_extent_map(em);
4875         return ret;
4876 }
4877
4878 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4879                       u64 logical, u64 *length,
4880                       struct btrfs_bio **bbio_ret, int mirror_num)
4881 {
4882         return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4883                                  mirror_num, NULL);
4884 }
4885
4886 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4887                      u64 chunk_start, u64 physical, u64 devid,
4888                      u64 **logical, int *naddrs, int *stripe_len)
4889 {
4890         struct extent_map_tree *em_tree = &map_tree->map_tree;
4891         struct extent_map *em;
4892         struct map_lookup *map;
4893         u64 *buf;
4894         u64 bytenr;
4895         u64 length;
4896         u64 stripe_nr;
4897         u64 rmap_len;
4898         int i, j, nr = 0;
4899
4900         read_lock(&em_tree->lock);
4901         em = lookup_extent_mapping(em_tree, chunk_start, 1);
4902         read_unlock(&em_tree->lock);
4903
4904         if (!em) {
4905                 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4906                        chunk_start);
4907                 return -EIO;
4908         }
4909
4910         if (em->start != chunk_start) {
4911                 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4912                        em->start, chunk_start);
4913                 free_extent_map(em);
4914                 return -EIO;
4915         }
4916         map = (struct map_lookup *)em->bdev;
4917
4918         length = em->len;
4919         rmap_len = map->stripe_len;
4920
4921         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4922                 do_div(length, map->num_stripes / map->sub_stripes);
4923         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4924                 do_div(length, map->num_stripes);
4925         else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4926                               BTRFS_BLOCK_GROUP_RAID6)) {
4927                 do_div(length, nr_data_stripes(map));
4928                 rmap_len = map->stripe_len * nr_data_stripes(map);
4929         }
4930
4931         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4932         BUG_ON(!buf); /* -ENOMEM */
4933
4934         for (i = 0; i < map->num_stripes; i++) {
4935                 if (devid && map->stripes[i].dev->devid != devid)
4936                         continue;
4937                 if (map->stripes[i].physical > physical ||
4938                     map->stripes[i].physical + length <= physical)
4939                         continue;
4940
4941                 stripe_nr = physical - map->stripes[i].physical;
4942                 do_div(stripe_nr, map->stripe_len);
4943
4944                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4945                         stripe_nr = stripe_nr * map->num_stripes + i;
4946                         do_div(stripe_nr, map->sub_stripes);
4947                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4948                         stripe_nr = stripe_nr * map->num_stripes + i;
4949                 } /* else if RAID[56], multiply by nr_data_stripes().
4950                    * Alternatively, just use rmap_len below instead of
4951                    * map->stripe_len */
4952
4953                 bytenr = chunk_start + stripe_nr * rmap_len;
4954                 WARN_ON(nr >= map->num_stripes);
4955                 for (j = 0; j < nr; j++) {
4956                         if (buf[j] == bytenr)
4957                                 break;
4958                 }
4959                 if (j == nr) {
4960                         WARN_ON(nr >= map->num_stripes);
4961                         buf[nr++] = bytenr;
4962                 }
4963         }
4964
4965         *logical = buf;
4966         *naddrs = nr;
4967         *stripe_len = rmap_len;
4968
4969         free_extent_map(em);
4970         return 0;
4971 }
4972
4973 static void btrfs_end_bio(struct bio *bio, int err)
4974 {
4975         struct btrfs_bio *bbio = bio->bi_private;
4976         int is_orig_bio = 0;
4977
4978         if (err) {
4979                 atomic_inc(&bbio->error);
4980                 if (err == -EIO || err == -EREMOTEIO) {
4981                         unsigned int stripe_index =
4982                                 btrfs_io_bio(bio)->stripe_index;
4983                         struct btrfs_device *dev;
4984
4985                         BUG_ON(stripe_index >= bbio->num_stripes);
4986                         dev = bbio->stripes[stripe_index].dev;
4987                         if (dev->bdev) {
4988                                 if (bio->bi_rw & WRITE)
4989                                         btrfs_dev_stat_inc(dev,
4990                                                 BTRFS_DEV_STAT_WRITE_ERRS);
4991                                 else
4992                                         btrfs_dev_stat_inc(dev,
4993                                                 BTRFS_DEV_STAT_READ_ERRS);
4994                                 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4995                                         btrfs_dev_stat_inc(dev,
4996                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
4997                                 btrfs_dev_stat_print_on_error(dev);
4998                         }
4999                 }
5000         }
5001
5002         if (bio == bbio->orig_bio)
5003                 is_orig_bio = 1;
5004
5005         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5006                 if (!is_orig_bio) {
5007                         bio_put(bio);
5008                         bio = bbio->orig_bio;
5009                 }
5010                 bio->bi_private = bbio->private;
5011                 bio->bi_end_io = bbio->end_io;
5012                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5013                 /* only send an error to the higher layers if it is
5014                  * beyond the tolerance of the btrfs bio
5015                  */
5016                 if (atomic_read(&bbio->error) > bbio->max_errors) {
5017                         err = -EIO;
5018                 } else {
5019                         /*
5020                          * this bio is actually up to date, we didn't
5021                          * go over the max number of errors
5022                          */
5023                         set_bit(BIO_UPTODATE, &bio->bi_flags);
5024                         err = 0;
5025                 }
5026                 kfree(bbio);
5027
5028                 bio_endio(bio, err);
5029         } else if (!is_orig_bio) {
5030                 bio_put(bio);
5031         }
5032 }
5033
5034 struct async_sched {
5035         struct bio *bio;
5036         int rw;
5037         struct btrfs_fs_info *info;
5038         struct btrfs_work work;
5039 };
5040
5041 /*
5042  * see run_scheduled_bios for a description of why bios are collected for
5043  * async submit.
5044  *
5045  * This will add one bio to the pending list for a device and make sure
5046  * the work struct is scheduled.
5047  */
5048 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5049                                         struct btrfs_device *device,
5050                                         int rw, struct bio *bio)
5051 {
5052         int should_queue = 1;
5053         struct btrfs_pending_bios *pending_bios;
5054
5055         if (device->missing || !device->bdev) {
5056                 bio_endio(bio, -EIO);
5057                 return;
5058         }
5059
5060         /* don't bother with additional async steps for reads, right now */
5061         if (!(rw & REQ_WRITE)) {
5062                 bio_get(bio);
5063                 btrfsic_submit_bio(rw, bio);
5064                 bio_put(bio);
5065                 return;
5066         }
5067
5068         /*
5069          * nr_async_bios allows us to reliably return congestion to the
5070          * higher layers.  Otherwise, the async bio makes it appear we have
5071          * made progress against dirty pages when we've really just put it
5072          * on a queue for later
5073          */
5074         atomic_inc(&root->fs_info->nr_async_bios);
5075         WARN_ON(bio->bi_next);
5076         bio->bi_next = NULL;
5077         bio->bi_rw |= rw;
5078
5079         spin_lock(&device->io_lock);
5080         if (bio->bi_rw & REQ_SYNC)
5081                 pending_bios = &device->pending_sync_bios;
5082         else
5083                 pending_bios = &device->pending_bios;
5084
5085         if (pending_bios->tail)
5086                 pending_bios->tail->bi_next = bio;
5087
5088         pending_bios->tail = bio;
5089         if (!pending_bios->head)
5090                 pending_bios->head = bio;
5091         if (device->running_pending)
5092                 should_queue = 0;
5093
5094         spin_unlock(&device->io_lock);
5095
5096         if (should_queue)
5097                 btrfs_queue_worker(&root->fs_info->submit_workers,
5098                                    &device->work);
5099 }
5100
5101 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5102                        sector_t sector)
5103 {
5104         struct bio_vec *prev;
5105         struct request_queue *q = bdev_get_queue(bdev);
5106         unsigned short max_sectors = queue_max_sectors(q);
5107         struct bvec_merge_data bvm = {
5108                 .bi_bdev = bdev,
5109                 .bi_sector = sector,
5110                 .bi_rw = bio->bi_rw,
5111         };
5112
5113         if (bio->bi_vcnt == 0) {
5114                 WARN_ON(1);
5115                 return 1;
5116         }
5117
5118         prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5119         if (bio_sectors(bio) > max_sectors)
5120                 return 0;
5121
5122         if (!q->merge_bvec_fn)
5123                 return 1;
5124
5125         bvm.bi_size = bio->bi_size - prev->bv_len;
5126         if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5127                 return 0;
5128         return 1;
5129 }
5130
5131 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5132                               struct bio *bio, u64 physical, int dev_nr,
5133                               int rw, int async)
5134 {
5135         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5136
5137         bio->bi_private = bbio;
5138         btrfs_io_bio(bio)->stripe_index = dev_nr;
5139         bio->bi_end_io = btrfs_end_bio;
5140         bio->bi_sector = physical >> 9;
5141 #ifdef DEBUG
5142         {
5143                 struct rcu_string *name;
5144
5145                 rcu_read_lock();
5146                 name = rcu_dereference(dev->name);
5147                 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5148                          "(%s id %llu), size=%u\n", rw,
5149                          (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5150                          name->str, dev->devid, bio->bi_size);
5151                 rcu_read_unlock();
5152         }
5153 #endif
5154         bio->bi_bdev = dev->bdev;
5155         if (async)
5156                 btrfs_schedule_bio(root, dev, rw, bio);
5157         else
5158                 btrfsic_submit_bio(rw, bio);
5159 }
5160
5161 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5162                               struct bio *first_bio, struct btrfs_device *dev,
5163                               int dev_nr, int rw, int async)
5164 {
5165         struct bio_vec *bvec = first_bio->bi_io_vec;
5166         struct bio *bio;
5167         int nr_vecs = bio_get_nr_vecs(dev->bdev);
5168         u64 physical = bbio->stripes[dev_nr].physical;
5169
5170 again:
5171         bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5172         if (!bio)
5173                 return -ENOMEM;
5174
5175         while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5176                 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5177                                  bvec->bv_offset) < bvec->bv_len) {
5178                         u64 len = bio->bi_size;
5179
5180                         atomic_inc(&bbio->stripes_pending);
5181                         submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5182                                           rw, async);
5183                         physical += len;
5184                         goto again;
5185                 }
5186                 bvec++;
5187         }
5188
5189         submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5190         return 0;
5191 }
5192
5193 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5194 {
5195         atomic_inc(&bbio->error);
5196         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5197                 bio->bi_private = bbio->private;
5198                 bio->bi_end_io = bbio->end_io;
5199                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5200                 bio->bi_sector = logical >> 9;
5201                 kfree(bbio);
5202                 bio_endio(bio, -EIO);
5203         }
5204 }
5205
5206 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5207                   int mirror_num, int async_submit)
5208 {
5209         struct btrfs_device *dev;
5210         struct bio *first_bio = bio;
5211         u64 logical = (u64)bio->bi_sector << 9;
5212         u64 length = 0;
5213         u64 map_length;
5214         u64 *raid_map = NULL;
5215         int ret;
5216         int dev_nr = 0;
5217         int total_devs = 1;
5218         struct btrfs_bio *bbio = NULL;
5219
5220         length = bio->bi_size;
5221         map_length = length;
5222
5223         ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5224                               mirror_num, &raid_map);
5225         if (ret) /* -ENOMEM */
5226                 return ret;
5227
5228         total_devs = bbio->num_stripes;
5229         bbio->orig_bio = first_bio;
5230         bbio->private = first_bio->bi_private;
5231         bbio->end_io = first_bio->bi_end_io;
5232         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5233
5234         if (raid_map) {
5235                 /* In this case, map_length has been set to the length of
5236                    a single stripe; not the whole write */
5237                 if (rw & WRITE) {
5238                         return raid56_parity_write(root, bio, bbio,
5239                                                    raid_map, map_length);
5240                 } else {
5241                         return raid56_parity_recover(root, bio, bbio,
5242                                                      raid_map, map_length,
5243                                                      mirror_num);
5244                 }
5245         }
5246
5247         if (map_length < length) {
5248                 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5249                         (unsigned long long)logical,
5250                         (unsigned long long)length,
5251                         (unsigned long long)map_length);
5252                 BUG();
5253         }
5254
5255         while (dev_nr < total_devs) {
5256                 dev = bbio->stripes[dev_nr].dev;
5257                 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5258                         bbio_error(bbio, first_bio, logical);
5259                         dev_nr++;
5260                         continue;
5261                 }
5262
5263                 /*
5264                  * Check and see if we're ok with this bio based on it's size
5265                  * and offset with the given device.
5266                  */
5267                 if (!bio_size_ok(dev->bdev, first_bio,
5268                                  bbio->stripes[dev_nr].physical >> 9)) {
5269                         ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5270                                                  dev_nr, rw, async_submit);
5271                         BUG_ON(ret);
5272                         dev_nr++;
5273                         continue;
5274                 }
5275
5276                 if (dev_nr < total_devs - 1) {
5277                         bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5278                         BUG_ON(!bio); /* -ENOMEM */
5279                 } else {
5280                         bio = first_bio;
5281                 }
5282
5283                 submit_stripe_bio(root, bbio, bio,
5284                                   bbio->stripes[dev_nr].physical, dev_nr, rw,
5285                                   async_submit);
5286                 dev_nr++;
5287         }
5288         return 0;
5289 }
5290
5291 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5292                                        u8 *uuid, u8 *fsid)
5293 {
5294         struct btrfs_device *device;
5295         struct btrfs_fs_devices *cur_devices;
5296
5297         cur_devices = fs_info->fs_devices;
5298         while (cur_devices) {
5299                 if (!fsid ||
5300                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5301                         device = __find_device(&cur_devices->devices,
5302                                                devid, uuid);
5303                         if (device)
5304                                 return device;
5305                 }
5306                 cur_devices = cur_devices->seed;
5307         }
5308         return NULL;
5309 }
5310
5311 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5312                                             u64 devid, u8 *dev_uuid)
5313 {
5314         struct btrfs_device *device;
5315         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5316
5317         device = kzalloc(sizeof(*device), GFP_NOFS);
5318         if (!device)
5319                 return NULL;
5320         list_add(&device->dev_list,
5321                  &fs_devices->devices);
5322         device->devid = devid;
5323         device->work.func = pending_bios_fn;
5324         device->fs_devices = fs_devices;
5325         device->missing = 1;
5326         fs_devices->num_devices++;
5327         fs_devices->missing_devices++;
5328         spin_lock_init(&device->io_lock);
5329         INIT_LIST_HEAD(&device->dev_alloc_list);
5330         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5331         return device;
5332 }
5333
5334 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5335                           struct extent_buffer *leaf,
5336                           struct btrfs_chunk *chunk)
5337 {
5338         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5339         struct map_lookup *map;
5340         struct extent_map *em;
5341         u64 logical;
5342         u64 length;
5343         u64 devid;
5344         u8 uuid[BTRFS_UUID_SIZE];
5345         int num_stripes;
5346         int ret;
5347         int i;
5348
5349         logical = key->offset;
5350         length = btrfs_chunk_length(leaf, chunk);
5351
5352         read_lock(&map_tree->map_tree.lock);
5353         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5354         read_unlock(&map_tree->map_tree.lock);
5355
5356         /* already mapped? */
5357         if (em && em->start <= logical && em->start + em->len > logical) {
5358                 free_extent_map(em);
5359                 return 0;
5360         } else if (em) {
5361                 free_extent_map(em);
5362         }
5363
5364         em = alloc_extent_map();
5365         if (!em)
5366                 return -ENOMEM;
5367         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5368         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5369         if (!map) {
5370                 free_extent_map(em);
5371                 return -ENOMEM;
5372         }
5373
5374         em->bdev = (struct block_device *)map;
5375         em->start = logical;
5376         em->len = length;
5377         em->orig_start = 0;
5378         em->block_start = 0;
5379         em->block_len = em->len;
5380
5381         map->num_stripes = num_stripes;
5382         map->io_width = btrfs_chunk_io_width(leaf, chunk);
5383         map->io_align = btrfs_chunk_io_align(leaf, chunk);
5384         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5385         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5386         map->type = btrfs_chunk_type(leaf, chunk);
5387         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5388         for (i = 0; i < num_stripes; i++) {
5389                 map->stripes[i].physical =
5390                         btrfs_stripe_offset_nr(leaf, chunk, i);
5391                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5392                 read_extent_buffer(leaf, uuid, (unsigned long)
5393                                    btrfs_stripe_dev_uuid_nr(chunk, i),
5394                                    BTRFS_UUID_SIZE);
5395                 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5396                                                         uuid, NULL);
5397                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5398                         kfree(map);
5399                         free_extent_map(em);
5400                         return -EIO;
5401                 }
5402                 if (!map->stripes[i].dev) {
5403                         map->stripes[i].dev =
5404                                 add_missing_dev(root, devid, uuid);
5405                         if (!map->stripes[i].dev) {
5406                                 kfree(map);
5407                                 free_extent_map(em);
5408                                 return -EIO;
5409                         }
5410                 }
5411                 map->stripes[i].dev->in_fs_metadata = 1;
5412         }
5413
5414         write_lock(&map_tree->map_tree.lock);
5415         ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5416         write_unlock(&map_tree->map_tree.lock);
5417         BUG_ON(ret); /* Tree corruption */
5418         free_extent_map(em);
5419
5420         return 0;
5421 }
5422
5423 static void fill_device_from_item(struct extent_buffer *leaf,
5424                                  struct btrfs_dev_item *dev_item,
5425                                  struct btrfs_device *device)
5426 {
5427         unsigned long ptr;
5428
5429         device->devid = btrfs_device_id(leaf, dev_item);
5430         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5431         device->total_bytes = device->disk_total_bytes;
5432         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5433         device->type = btrfs_device_type(leaf, dev_item);
5434         device->io_align = btrfs_device_io_align(leaf, dev_item);
5435         device->io_width = btrfs_device_io_width(leaf, dev_item);
5436         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5437         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5438         device->is_tgtdev_for_dev_replace = 0;
5439
5440         ptr = (unsigned long)btrfs_device_uuid(dev_item);
5441         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5442 }
5443
5444 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5445 {
5446         struct btrfs_fs_devices *fs_devices;
5447         int ret;
5448
5449         BUG_ON(!mutex_is_locked(&uuid_mutex));
5450
5451         fs_devices = root->fs_info->fs_devices->seed;
5452         while (fs_devices) {
5453                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5454                         ret = 0;
5455                         goto out;
5456                 }
5457                 fs_devices = fs_devices->seed;
5458         }
5459
5460         fs_devices = find_fsid(fsid);
5461         if (!fs_devices) {
5462                 ret = -ENOENT;
5463                 goto out;
5464         }
5465
5466         fs_devices = clone_fs_devices(fs_devices);
5467         if (IS_ERR(fs_devices)) {
5468                 ret = PTR_ERR(fs_devices);
5469                 goto out;
5470         }
5471
5472         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5473                                    root->fs_info->bdev_holder);
5474         if (ret) {
5475                 free_fs_devices(fs_devices);
5476                 goto out;
5477         }
5478
5479         if (!fs_devices->seeding) {
5480                 __btrfs_close_devices(fs_devices);
5481                 free_fs_devices(fs_devices);
5482                 ret = -EINVAL;
5483                 goto out;
5484         }
5485
5486         fs_devices->seed = root->fs_info->fs_devices->seed;
5487         root->fs_info->fs_devices->seed = fs_devices;
5488 out:
5489         return ret;
5490 }
5491
5492 static int read_one_dev(struct btrfs_root *root,
5493                         struct extent_buffer *leaf,
5494                         struct btrfs_dev_item *dev_item)
5495 {
5496         struct btrfs_device *device;
5497         u64 devid;
5498         int ret;
5499         u8 fs_uuid[BTRFS_UUID_SIZE];
5500         u8 dev_uuid[BTRFS_UUID_SIZE];
5501
5502         devid = btrfs_device_id(leaf, dev_item);
5503         read_extent_buffer(leaf, dev_uuid,
5504                            (unsigned long)btrfs_device_uuid(dev_item),
5505                            BTRFS_UUID_SIZE);
5506         read_extent_buffer(leaf, fs_uuid,
5507                            (unsigned long)btrfs_device_fsid(dev_item),
5508                            BTRFS_UUID_SIZE);
5509
5510         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5511                 ret = open_seed_devices(root, fs_uuid);
5512                 if (ret && !btrfs_test_opt(root, DEGRADED))
5513                         return ret;
5514         }
5515
5516         device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5517         if (!device || !device->bdev) {
5518                 if (!btrfs_test_opt(root, DEGRADED))
5519                         return -EIO;
5520
5521                 if (!device) {
5522                         btrfs_warn(root->fs_info, "devid %llu missing",
5523                                 (unsigned long long)devid);
5524                         device = add_missing_dev(root, devid, dev_uuid);
5525                         if (!device)
5526                                 return -ENOMEM;
5527                 } else if (!device->missing) {
5528                         /*
5529                          * this happens when a device that was properly setup
5530                          * in the device info lists suddenly goes bad.
5531                          * device->bdev is NULL, and so we have to set
5532                          * device->missing to one here
5533                          */
5534                         root->fs_info->fs_devices->missing_devices++;
5535                         device->missing = 1;
5536                 }
5537         }
5538
5539         if (device->fs_devices != root->fs_info->fs_devices) {
5540                 BUG_ON(device->writeable);
5541                 if (device->generation !=
5542                     btrfs_device_generation(leaf, dev_item))
5543                         return -EINVAL;
5544         }
5545
5546         fill_device_from_item(leaf, dev_item, device);
5547         device->in_fs_metadata = 1;
5548         if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5549                 device->fs_devices->total_rw_bytes += device->total_bytes;
5550                 spin_lock(&root->fs_info->free_chunk_lock);
5551                 root->fs_info->free_chunk_space += device->total_bytes -
5552                         device->bytes_used;
5553                 spin_unlock(&root->fs_info->free_chunk_lock);
5554         }
5555         ret = 0;
5556         return ret;
5557 }
5558
5559 int btrfs_read_sys_array(struct btrfs_root *root)
5560 {
5561         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5562         struct extent_buffer *sb;
5563         struct btrfs_disk_key *disk_key;
5564         struct btrfs_chunk *chunk;
5565         u8 *ptr;
5566         unsigned long sb_ptr;
5567         int ret = 0;
5568         u32 num_stripes;
5569         u32 array_size;
5570         u32 len = 0;
5571         u32 cur;
5572         struct btrfs_key key;
5573
5574         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5575                                           BTRFS_SUPER_INFO_SIZE);
5576         if (!sb)
5577                 return -ENOMEM;
5578         btrfs_set_buffer_uptodate(sb);
5579         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5580         /*
5581          * The sb extent buffer is artifical and just used to read the system array.
5582          * btrfs_set_buffer_uptodate() call does not properly mark all it's
5583          * pages up-to-date when the page is larger: extent does not cover the
5584          * whole page and consequently check_page_uptodate does not find all
5585          * the page's extents up-to-date (the hole beyond sb),
5586          * write_extent_buffer then triggers a WARN_ON.
5587          *
5588          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5589          * but sb spans only this function. Add an explicit SetPageUptodate call
5590          * to silence the warning eg. on PowerPC 64.
5591          */
5592         if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5593                 SetPageUptodate(sb->pages[0]);
5594
5595         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5596         array_size = btrfs_super_sys_array_size(super_copy);
5597
5598         ptr = super_copy->sys_chunk_array;
5599         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5600         cur = 0;
5601
5602         while (cur < array_size) {
5603                 disk_key = (struct btrfs_disk_key *)ptr;
5604                 btrfs_disk_key_to_cpu(&key, disk_key);
5605
5606                 len = sizeof(*disk_key); ptr += len;
5607                 sb_ptr += len;
5608                 cur += len;
5609
5610                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5611                         chunk = (struct btrfs_chunk *)sb_ptr;
5612                         ret = read_one_chunk(root, &key, sb, chunk);
5613                         if (ret)
5614                                 break;
5615                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5616                         len = btrfs_chunk_item_size(num_stripes);
5617                 } else {
5618                         ret = -EIO;
5619                         break;
5620                 }
5621                 ptr += len;
5622                 sb_ptr += len;
5623                 cur += len;
5624         }
5625         free_extent_buffer(sb);
5626         return ret;
5627 }
5628
5629 int btrfs_read_chunk_tree(struct btrfs_root *root)
5630 {
5631         struct btrfs_path *path;
5632         struct extent_buffer *leaf;
5633         struct btrfs_key key;
5634         struct btrfs_key found_key;
5635         int ret;
5636         int slot;
5637
5638         root = root->fs_info->chunk_root;
5639
5640         path = btrfs_alloc_path();
5641         if (!path)
5642                 return -ENOMEM;
5643
5644         mutex_lock(&uuid_mutex);
5645         lock_chunks(root);
5646
5647         /* first we search for all of the device items, and then we
5648          * read in all of the chunk items.  This way we can create chunk
5649          * mappings that reference all of the devices that are afound
5650          */
5651         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5652         key.offset = 0;
5653         key.type = 0;
5654 again:
5655         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5656         if (ret < 0)
5657                 goto error;
5658         while (1) {
5659                 leaf = path->nodes[0];
5660                 slot = path->slots[0];
5661                 if (slot >= btrfs_header_nritems(leaf)) {
5662                         ret = btrfs_next_leaf(root, path);
5663                         if (ret == 0)
5664                                 continue;
5665                         if (ret < 0)
5666                                 goto error;
5667                         break;
5668                 }
5669                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5670                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5671                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5672                                 break;
5673                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5674                                 struct btrfs_dev_item *dev_item;
5675                                 dev_item = btrfs_item_ptr(leaf, slot,
5676                                                   struct btrfs_dev_item);
5677                                 ret = read_one_dev(root, leaf, dev_item);
5678                                 if (ret)
5679                                         goto error;
5680                         }
5681                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5682                         struct btrfs_chunk *chunk;
5683                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5684                         ret = read_one_chunk(root, &found_key, leaf, chunk);
5685                         if (ret)
5686                                 goto error;
5687                 }
5688                 path->slots[0]++;
5689         }
5690         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5691                 key.objectid = 0;
5692                 btrfs_release_path(path);
5693                 goto again;
5694         }
5695         ret = 0;
5696 error:
5697         unlock_chunks(root);
5698         mutex_unlock(&uuid_mutex);
5699
5700         btrfs_free_path(path);
5701         return ret;
5702 }
5703
5704 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
5705 {
5706         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5707         struct btrfs_device *device;
5708
5709         mutex_lock(&fs_devices->device_list_mutex);
5710         list_for_each_entry(device, &fs_devices->devices, dev_list)
5711                 device->dev_root = fs_info->dev_root;
5712         mutex_unlock(&fs_devices->device_list_mutex);
5713 }
5714
5715 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5716 {
5717         int i;
5718
5719         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5720                 btrfs_dev_stat_reset(dev, i);
5721 }
5722
5723 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5724 {
5725         struct btrfs_key key;
5726         struct btrfs_key found_key;
5727         struct btrfs_root *dev_root = fs_info->dev_root;
5728         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5729         struct extent_buffer *eb;
5730         int slot;
5731         int ret = 0;
5732         struct btrfs_device *device;
5733         struct btrfs_path *path = NULL;
5734         int i;
5735
5736         path = btrfs_alloc_path();
5737         if (!path) {
5738                 ret = -ENOMEM;
5739                 goto out;
5740         }
5741
5742         mutex_lock(&fs_devices->device_list_mutex);
5743         list_for_each_entry(device, &fs_devices->devices, dev_list) {
5744                 int item_size;
5745                 struct btrfs_dev_stats_item *ptr;
5746
5747                 key.objectid = 0;
5748                 key.type = BTRFS_DEV_STATS_KEY;
5749                 key.offset = device->devid;
5750                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5751                 if (ret) {
5752                         __btrfs_reset_dev_stats(device);
5753                         device->dev_stats_valid = 1;
5754                         btrfs_release_path(path);
5755                         continue;
5756                 }
5757                 slot = path->slots[0];
5758                 eb = path->nodes[0];
5759                 btrfs_item_key_to_cpu(eb, &found_key, slot);
5760                 item_size = btrfs_item_size_nr(eb, slot);
5761
5762                 ptr = btrfs_item_ptr(eb, slot,
5763                                      struct btrfs_dev_stats_item);
5764
5765                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5766                         if (item_size >= (1 + i) * sizeof(__le64))
5767                                 btrfs_dev_stat_set(device, i,
5768                                         btrfs_dev_stats_value(eb, ptr, i));
5769                         else
5770                                 btrfs_dev_stat_reset(device, i);
5771                 }
5772
5773                 device->dev_stats_valid = 1;
5774                 btrfs_dev_stat_print_on_load(device);
5775                 btrfs_release_path(path);
5776         }
5777         mutex_unlock(&fs_devices->device_list_mutex);
5778
5779 out:
5780         btrfs_free_path(path);
5781         return ret < 0 ? ret : 0;
5782 }
5783
5784 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5785                                 struct btrfs_root *dev_root,
5786                                 struct btrfs_device *device)
5787 {
5788         struct btrfs_path *path;
5789         struct btrfs_key key;
5790         struct extent_buffer *eb;
5791         struct btrfs_dev_stats_item *ptr;
5792         int ret;
5793         int i;
5794
5795         key.objectid = 0;
5796         key.type = BTRFS_DEV_STATS_KEY;
5797         key.offset = device->devid;
5798
5799         path = btrfs_alloc_path();
5800         BUG_ON(!path);
5801         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5802         if (ret < 0) {
5803                 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5804                               ret, rcu_str_deref(device->name));
5805                 goto out;
5806         }
5807
5808         if (ret == 0 &&
5809             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5810                 /* need to delete old one and insert a new one */
5811                 ret = btrfs_del_item(trans, dev_root, path);
5812                 if (ret != 0) {
5813                         printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5814                                       rcu_str_deref(device->name), ret);
5815                         goto out;
5816                 }
5817                 ret = 1;
5818         }
5819
5820         if (ret == 1) {
5821                 /* need to insert a new item */
5822                 btrfs_release_path(path);
5823                 ret = btrfs_insert_empty_item(trans, dev_root, path,
5824                                               &key, sizeof(*ptr));
5825                 if (ret < 0) {
5826                         printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5827                                       rcu_str_deref(device->name), ret);
5828                         goto out;
5829                 }
5830         }
5831
5832         eb = path->nodes[0];
5833         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5834         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5835                 btrfs_set_dev_stats_value(eb, ptr, i,
5836                                           btrfs_dev_stat_read(device, i));
5837         btrfs_mark_buffer_dirty(eb);
5838
5839 out:
5840         btrfs_free_path(path);
5841         return ret;
5842 }
5843
5844 /*
5845  * called from commit_transaction. Writes all changed device stats to disk.
5846  */
5847 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5848                         struct btrfs_fs_info *fs_info)
5849 {
5850         struct btrfs_root *dev_root = fs_info->dev_root;
5851         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5852         struct btrfs_device *device;
5853         int ret = 0;
5854
5855         mutex_lock(&fs_devices->device_list_mutex);
5856         list_for_each_entry(device, &fs_devices->devices, dev_list) {
5857                 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5858                         continue;
5859
5860                 ret = update_dev_stat_item(trans, dev_root, device);
5861                 if (!ret)
5862                         device->dev_stats_dirty = 0;
5863         }
5864         mutex_unlock(&fs_devices->device_list_mutex);
5865
5866         return ret;
5867 }
5868
5869 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5870 {
5871         btrfs_dev_stat_inc(dev, index);
5872         btrfs_dev_stat_print_on_error(dev);
5873 }
5874
5875 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5876 {
5877         if (!dev->dev_stats_valid)
5878                 return;
5879         printk_ratelimited_in_rcu(KERN_ERR
5880                            "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5881                            rcu_str_deref(dev->name),
5882                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5883                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5884                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5885                            btrfs_dev_stat_read(dev,
5886                                                BTRFS_DEV_STAT_CORRUPTION_ERRS),
5887                            btrfs_dev_stat_read(dev,
5888                                                BTRFS_DEV_STAT_GENERATION_ERRS));
5889 }
5890
5891 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5892 {
5893         int i;
5894
5895         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5896                 if (btrfs_dev_stat_read(dev, i) != 0)
5897                         break;
5898         if (i == BTRFS_DEV_STAT_VALUES_MAX)
5899                 return; /* all values == 0, suppress message */
5900
5901         printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5902                rcu_str_deref(dev->name),
5903                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5904                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5905                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5906                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5907                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5908 }
5909
5910 int btrfs_get_dev_stats(struct btrfs_root *root,
5911                         struct btrfs_ioctl_get_dev_stats *stats)
5912 {
5913         struct btrfs_device *dev;
5914         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5915         int i;
5916
5917         mutex_lock(&fs_devices->device_list_mutex);
5918         dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5919         mutex_unlock(&fs_devices->device_list_mutex);
5920
5921         if (!dev) {
5922                 printk(KERN_WARNING
5923                        "btrfs: get dev_stats failed, device not found\n");
5924                 return -ENODEV;
5925         } else if (!dev->dev_stats_valid) {
5926                 printk(KERN_WARNING
5927                        "btrfs: get dev_stats failed, not yet valid\n");
5928                 return -ENODEV;
5929         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5930                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5931                         if (stats->nr_items > i)
5932                                 stats->values[i] =
5933                                         btrfs_dev_stat_read_and_reset(dev, i);
5934                         else
5935                                 btrfs_dev_stat_reset(dev, i);
5936                 }
5937         } else {
5938                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5939                         if (stats->nr_items > i)
5940                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
5941         }
5942         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5943                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5944         return 0;
5945 }
5946
5947 int btrfs_scratch_superblock(struct btrfs_device *device)
5948 {
5949         struct buffer_head *bh;
5950         struct btrfs_super_block *disk_super;
5951
5952         bh = btrfs_read_dev_super(device->bdev);
5953         if (!bh)
5954                 return -EINVAL;
5955         disk_super = (struct btrfs_super_block *)bh->b_data;
5956
5957         memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5958         set_buffer_dirty(bh);
5959         sync_dirty_buffer(bh);
5960         brelse(bh);
5961
5962         return 0;
5963 }