474209f50844a546c2d9572a603f4dc3549e7692
[cascardo/linux.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53
54 #ifdef CONFIG_X86
55 #include <asm/cpufeature.h>
56 #endif
57
58 #define BTRFS_SUPER_FLAG_SUPP   (BTRFS_HEADER_FLAG_WRITTEN |\
59                                  BTRFS_HEADER_FLAG_RELOC |\
60                                  BTRFS_SUPER_FLAG_ERROR |\
61                                  BTRFS_SUPER_FLAG_SEEDING |\
62                                  BTRFS_SUPER_FLAG_METADUMP)
63
64 static const struct extent_io_ops btree_extent_io_ops;
65 static void end_workqueue_fn(struct btrfs_work *work);
66 static void free_fs_root(struct btrfs_root *root);
67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
68                                     int read_only);
69 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
70 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
71                                       struct btrfs_root *root);
72 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
73 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
74                                         struct extent_io_tree *dirty_pages,
75                                         int mark);
76 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
77                                        struct extent_io_tree *pinned_extents);
78 static int btrfs_cleanup_transaction(struct btrfs_root *root);
79 static void btrfs_error_commit_super(struct btrfs_root *root);
80
81 /*
82  * btrfs_end_io_wq structs are used to do processing in task context when an IO
83  * is complete.  This is used during reads to verify checksums, and it is used
84  * by writes to insert metadata for new file extents after IO is complete.
85  */
86 struct btrfs_end_io_wq {
87         struct bio *bio;
88         bio_end_io_t *end_io;
89         void *private;
90         struct btrfs_fs_info *info;
91         int error;
92         enum btrfs_wq_endio_type metadata;
93         struct list_head list;
94         struct btrfs_work work;
95 };
96
97 static struct kmem_cache *btrfs_end_io_wq_cache;
98
99 int __init btrfs_end_io_wq_init(void)
100 {
101         btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
102                                         sizeof(struct btrfs_end_io_wq),
103                                         0,
104                                         SLAB_MEM_SPREAD,
105                                         NULL);
106         if (!btrfs_end_io_wq_cache)
107                 return -ENOMEM;
108         return 0;
109 }
110
111 void btrfs_end_io_wq_exit(void)
112 {
113         kmem_cache_destroy(btrfs_end_io_wq_cache);
114 }
115
116 /*
117  * async submit bios are used to offload expensive checksumming
118  * onto the worker threads.  They checksum file and metadata bios
119  * just before they are sent down the IO stack.
120  */
121 struct async_submit_bio {
122         struct inode *inode;
123         struct bio *bio;
124         struct list_head list;
125         extent_submit_bio_hook_t *submit_bio_start;
126         extent_submit_bio_hook_t *submit_bio_done;
127         int rw;
128         int mirror_num;
129         unsigned long bio_flags;
130         /*
131          * bio_offset is optional, can be used if the pages in the bio
132          * can't tell us where in the file the bio should go
133          */
134         u64 bio_offset;
135         struct btrfs_work work;
136         int error;
137 };
138
139 /*
140  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
141  * eb, the lockdep key is determined by the btrfs_root it belongs to and
142  * the level the eb occupies in the tree.
143  *
144  * Different roots are used for different purposes and may nest inside each
145  * other and they require separate keysets.  As lockdep keys should be
146  * static, assign keysets according to the purpose of the root as indicated
147  * by btrfs_root->objectid.  This ensures that all special purpose roots
148  * have separate keysets.
149  *
150  * Lock-nesting across peer nodes is always done with the immediate parent
151  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
152  * subclass to avoid triggering lockdep warning in such cases.
153  *
154  * The key is set by the readpage_end_io_hook after the buffer has passed
155  * csum validation but before the pages are unlocked.  It is also set by
156  * btrfs_init_new_buffer on freshly allocated blocks.
157  *
158  * We also add a check to make sure the highest level of the tree is the
159  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
160  * needs update as well.
161  */
162 #ifdef CONFIG_DEBUG_LOCK_ALLOC
163 # if BTRFS_MAX_LEVEL != 8
164 #  error
165 # endif
166
167 static struct btrfs_lockdep_keyset {
168         u64                     id;             /* root objectid */
169         const char              *name_stem;     /* lock name stem */
170         char                    names[BTRFS_MAX_LEVEL + 1][20];
171         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
172 } btrfs_lockdep_keysets[] = {
173         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
174         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
175         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
176         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
177         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
178         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
179         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
180         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
181         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
182         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
183         { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
184         { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
185         { .id = 0,                              .name_stem = "tree"     },
186 };
187
188 void __init btrfs_init_lockdep(void)
189 {
190         int i, j;
191
192         /* initialize lockdep class names */
193         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
194                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
195
196                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
197                         snprintf(ks->names[j], sizeof(ks->names[j]),
198                                  "btrfs-%s-%02d", ks->name_stem, j);
199         }
200 }
201
202 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
203                                     int level)
204 {
205         struct btrfs_lockdep_keyset *ks;
206
207         BUG_ON(level >= ARRAY_SIZE(ks->keys));
208
209         /* find the matching keyset, id 0 is the default entry */
210         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
211                 if (ks->id == objectid)
212                         break;
213
214         lockdep_set_class_and_name(&eb->lock,
215                                    &ks->keys[level], ks->names[level]);
216 }
217
218 #endif
219
220 /*
221  * extents on the btree inode are pretty simple, there's one extent
222  * that covers the entire device
223  */
224 static struct extent_map *btree_get_extent(struct inode *inode,
225                 struct page *page, size_t pg_offset, u64 start, u64 len,
226                 int create)
227 {
228         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
229         struct extent_map *em;
230         int ret;
231
232         read_lock(&em_tree->lock);
233         em = lookup_extent_mapping(em_tree, start, len);
234         if (em) {
235                 em->bdev =
236                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
237                 read_unlock(&em_tree->lock);
238                 goto out;
239         }
240         read_unlock(&em_tree->lock);
241
242         em = alloc_extent_map();
243         if (!em) {
244                 em = ERR_PTR(-ENOMEM);
245                 goto out;
246         }
247         em->start = 0;
248         em->len = (u64)-1;
249         em->block_len = (u64)-1;
250         em->block_start = 0;
251         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
252
253         write_lock(&em_tree->lock);
254         ret = add_extent_mapping(em_tree, em, 0);
255         if (ret == -EEXIST) {
256                 free_extent_map(em);
257                 em = lookup_extent_mapping(em_tree, start, len);
258                 if (!em)
259                         em = ERR_PTR(-EIO);
260         } else if (ret) {
261                 free_extent_map(em);
262                 em = ERR_PTR(ret);
263         }
264         write_unlock(&em_tree->lock);
265
266 out:
267         return em;
268 }
269
270 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
271 {
272         return btrfs_crc32c(seed, data, len);
273 }
274
275 void btrfs_csum_final(u32 crc, char *result)
276 {
277         put_unaligned_le32(~crc, result);
278 }
279
280 /*
281  * compute the csum for a btree block, and either verify it or write it
282  * into the csum field of the block.
283  */
284 static int csum_tree_block(struct btrfs_fs_info *fs_info,
285                            struct extent_buffer *buf,
286                            int verify)
287 {
288         u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
289         char *result = NULL;
290         unsigned long len;
291         unsigned long cur_len;
292         unsigned long offset = BTRFS_CSUM_SIZE;
293         char *kaddr;
294         unsigned long map_start;
295         unsigned long map_len;
296         int err;
297         u32 crc = ~(u32)0;
298         unsigned long inline_result;
299
300         len = buf->len - offset;
301         while (len > 0) {
302                 err = map_private_extent_buffer(buf, offset, 32,
303                                         &kaddr, &map_start, &map_len);
304                 if (err)
305                         return err;
306                 cur_len = min(len, map_len - (offset - map_start));
307                 crc = btrfs_csum_data(kaddr + offset - map_start,
308                                       crc, cur_len);
309                 len -= cur_len;
310                 offset += cur_len;
311         }
312         if (csum_size > sizeof(inline_result)) {
313                 result = kzalloc(csum_size, GFP_NOFS);
314                 if (!result)
315                         return -ENOMEM;
316         } else {
317                 result = (char *)&inline_result;
318         }
319
320         btrfs_csum_final(crc, result);
321
322         if (verify) {
323                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
324                         u32 val;
325                         u32 found = 0;
326                         memcpy(&found, result, csum_size);
327
328                         read_extent_buffer(buf, &val, 0, csum_size);
329                         btrfs_warn_rl(fs_info,
330                                 "%s checksum verify failed on %llu wanted %X found %X "
331                                 "level %d",
332                                 fs_info->sb->s_id, buf->start,
333                                 val, found, btrfs_header_level(buf));
334                         if (result != (char *)&inline_result)
335                                 kfree(result);
336                         return -EUCLEAN;
337                 }
338         } else {
339                 write_extent_buffer(buf, result, 0, csum_size);
340         }
341         if (result != (char *)&inline_result)
342                 kfree(result);
343         return 0;
344 }
345
346 /*
347  * we can't consider a given block up to date unless the transid of the
348  * block matches the transid in the parent node's pointer.  This is how we
349  * detect blocks that either didn't get written at all or got written
350  * in the wrong place.
351  */
352 static int verify_parent_transid(struct extent_io_tree *io_tree,
353                                  struct extent_buffer *eb, u64 parent_transid,
354                                  int atomic)
355 {
356         struct extent_state *cached_state = NULL;
357         int ret;
358         bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
359
360         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
361                 return 0;
362
363         if (atomic)
364                 return -EAGAIN;
365
366         if (need_lock) {
367                 btrfs_tree_read_lock(eb);
368                 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
369         }
370
371         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
372                          &cached_state);
373         if (extent_buffer_uptodate(eb) &&
374             btrfs_header_generation(eb) == parent_transid) {
375                 ret = 0;
376                 goto out;
377         }
378         btrfs_err_rl(eb->fs_info,
379                 "parent transid verify failed on %llu wanted %llu found %llu",
380                         eb->start,
381                         parent_transid, btrfs_header_generation(eb));
382         ret = 1;
383
384         /*
385          * Things reading via commit roots that don't have normal protection,
386          * like send, can have a really old block in cache that may point at a
387          * block that has been freed and re-allocated.  So don't clear uptodate
388          * if we find an eb that is under IO (dirty/writeback) because we could
389          * end up reading in the stale data and then writing it back out and
390          * making everybody very sad.
391          */
392         if (!extent_buffer_under_io(eb))
393                 clear_extent_buffer_uptodate(eb);
394 out:
395         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
396                              &cached_state, GFP_NOFS);
397         if (need_lock)
398                 btrfs_tree_read_unlock_blocking(eb);
399         return ret;
400 }
401
402 /*
403  * Return 0 if the superblock checksum type matches the checksum value of that
404  * algorithm. Pass the raw disk superblock data.
405  */
406 static int btrfs_check_super_csum(char *raw_disk_sb)
407 {
408         struct btrfs_super_block *disk_sb =
409                 (struct btrfs_super_block *)raw_disk_sb;
410         u16 csum_type = btrfs_super_csum_type(disk_sb);
411         int ret = 0;
412
413         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
414                 u32 crc = ~(u32)0;
415                 const int csum_size = sizeof(crc);
416                 char result[csum_size];
417
418                 /*
419                  * The super_block structure does not span the whole
420                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
421                  * is filled with zeros and is included in the checksum.
422                  */
423                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
424                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
425                 btrfs_csum_final(crc, result);
426
427                 if (memcmp(raw_disk_sb, result, csum_size))
428                         ret = 1;
429         }
430
431         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
432                 printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
433                                 csum_type);
434                 ret = 1;
435         }
436
437         return ret;
438 }
439
440 /*
441  * helper to read a given tree block, doing retries as required when
442  * the checksums don't match and we have alternate mirrors to try.
443  */
444 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
445                                           struct extent_buffer *eb,
446                                           u64 start, u64 parent_transid)
447 {
448         struct extent_io_tree *io_tree;
449         int failed = 0;
450         int ret;
451         int num_copies = 0;
452         int mirror_num = 0;
453         int failed_mirror = 0;
454
455         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
456         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
457         while (1) {
458                 ret = read_extent_buffer_pages(io_tree, eb, start,
459                                                WAIT_COMPLETE,
460                                                btree_get_extent, mirror_num);
461                 if (!ret) {
462                         if (!verify_parent_transid(io_tree, eb,
463                                                    parent_transid, 0))
464                                 break;
465                         else
466                                 ret = -EIO;
467                 }
468
469                 /*
470                  * This buffer's crc is fine, but its contents are corrupted, so
471                  * there is no reason to read the other copies, they won't be
472                  * any less wrong.
473                  */
474                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
475                         break;
476
477                 num_copies = btrfs_num_copies(root->fs_info,
478                                               eb->start, eb->len);
479                 if (num_copies == 1)
480                         break;
481
482                 if (!failed_mirror) {
483                         failed = 1;
484                         failed_mirror = eb->read_mirror;
485                 }
486
487                 mirror_num++;
488                 if (mirror_num == failed_mirror)
489                         mirror_num++;
490
491                 if (mirror_num > num_copies)
492                         break;
493         }
494
495         if (failed && !ret && failed_mirror)
496                 repair_eb_io_failure(root, eb, failed_mirror);
497
498         return ret;
499 }
500
501 /*
502  * checksum a dirty tree block before IO.  This has extra checks to make sure
503  * we only fill in the checksum field in the first page of a multi-page block
504  */
505
506 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
507 {
508         u64 start = page_offset(page);
509         u64 found_start;
510         struct extent_buffer *eb;
511
512         eb = (struct extent_buffer *)page->private;
513         if (page != eb->pages[0])
514                 return 0;
515
516         found_start = btrfs_header_bytenr(eb);
517         /*
518          * Please do not consolidate these warnings into a single if.
519          * It is useful to know what went wrong.
520          */
521         if (WARN_ON(found_start != start))
522                 return -EUCLEAN;
523         if (WARN_ON(!PageUptodate(page)))
524                 return -EUCLEAN;
525
526         ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
527                         btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
528
529         return csum_tree_block(fs_info, eb, 0);
530 }
531
532 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
533                                  struct extent_buffer *eb)
534 {
535         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
536         u8 fsid[BTRFS_UUID_SIZE];
537         int ret = 1;
538
539         read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
540         while (fs_devices) {
541                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
542                         ret = 0;
543                         break;
544                 }
545                 fs_devices = fs_devices->seed;
546         }
547         return ret;
548 }
549
550 #define CORRUPT(reason, eb, root, slot)                         \
551         btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"       \
552                    "root=%llu, slot=%d", reason,                        \
553                btrfs_header_bytenr(eb), root->objectid, slot)
554
555 static noinline int check_leaf(struct btrfs_root *root,
556                                struct extent_buffer *leaf)
557 {
558         struct btrfs_key key;
559         struct btrfs_key leaf_key;
560         u32 nritems = btrfs_header_nritems(leaf);
561         int slot;
562
563         if (nritems == 0)
564                 return 0;
565
566         /* Check the 0 item */
567         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
568             BTRFS_LEAF_DATA_SIZE(root)) {
569                 CORRUPT("invalid item offset size pair", leaf, root, 0);
570                 return -EIO;
571         }
572
573         /*
574          * Check to make sure each items keys are in the correct order and their
575          * offsets make sense.  We only have to loop through nritems-1 because
576          * we check the current slot against the next slot, which verifies the
577          * next slot's offset+size makes sense and that the current's slot
578          * offset is correct.
579          */
580         for (slot = 0; slot < nritems - 1; slot++) {
581                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
582                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
583
584                 /* Make sure the keys are in the right order */
585                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
586                         CORRUPT("bad key order", leaf, root, slot);
587                         return -EIO;
588                 }
589
590                 /*
591                  * Make sure the offset and ends are right, remember that the
592                  * item data starts at the end of the leaf and grows towards the
593                  * front.
594                  */
595                 if (btrfs_item_offset_nr(leaf, slot) !=
596                         btrfs_item_end_nr(leaf, slot + 1)) {
597                         CORRUPT("slot offset bad", leaf, root, slot);
598                         return -EIO;
599                 }
600
601                 /*
602                  * Check to make sure that we don't point outside of the leaf,
603                  * just in case all the items are consistent to each other, but
604                  * all point outside of the leaf.
605                  */
606                 if (btrfs_item_end_nr(leaf, slot) >
607                     BTRFS_LEAF_DATA_SIZE(root)) {
608                         CORRUPT("slot end outside of leaf", leaf, root, slot);
609                         return -EIO;
610                 }
611         }
612
613         return 0;
614 }
615
616 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
617 {
618         unsigned long nr = btrfs_header_nritems(node);
619
620         if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
621                 btrfs_crit(root->fs_info,
622                            "corrupt node: block %llu root %llu nritems %lu",
623                            node->start, root->objectid, nr);
624                 return -EIO;
625         }
626         return 0;
627 }
628
629 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
630                                       u64 phy_offset, struct page *page,
631                                       u64 start, u64 end, int mirror)
632 {
633         u64 found_start;
634         int found_level;
635         struct extent_buffer *eb;
636         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
637         struct btrfs_fs_info *fs_info = root->fs_info;
638         int ret = 0;
639         int reads_done;
640
641         if (!page->private)
642                 goto out;
643
644         eb = (struct extent_buffer *)page->private;
645
646         /* the pending IO might have been the only thing that kept this buffer
647          * in memory.  Make sure we have a ref for all this other checks
648          */
649         extent_buffer_get(eb);
650
651         reads_done = atomic_dec_and_test(&eb->io_pages);
652         if (!reads_done)
653                 goto err;
654
655         eb->read_mirror = mirror;
656         if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
657                 ret = -EIO;
658                 goto err;
659         }
660
661         found_start = btrfs_header_bytenr(eb);
662         if (found_start != eb->start) {
663                 btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
664                              found_start, eb->start);
665                 ret = -EIO;
666                 goto err;
667         }
668         if (check_tree_block_fsid(fs_info, eb)) {
669                 btrfs_err_rl(fs_info, "bad fsid on block %llu",
670                              eb->start);
671                 ret = -EIO;
672                 goto err;
673         }
674         found_level = btrfs_header_level(eb);
675         if (found_level >= BTRFS_MAX_LEVEL) {
676                 btrfs_err(fs_info, "bad tree block level %d",
677                           (int)btrfs_header_level(eb));
678                 ret = -EIO;
679                 goto err;
680         }
681
682         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
683                                        eb, found_level);
684
685         ret = csum_tree_block(fs_info, eb, 1);
686         if (ret)
687                 goto err;
688
689         /*
690          * If this is a leaf block and it is corrupt, set the corrupt bit so
691          * that we don't try and read the other copies of this block, just
692          * return -EIO.
693          */
694         if (found_level == 0 && check_leaf(root, eb)) {
695                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
696                 ret = -EIO;
697         }
698
699         if (found_level > 0 && check_node(root, eb))
700                 ret = -EIO;
701
702         if (!ret)
703                 set_extent_buffer_uptodate(eb);
704 err:
705         if (reads_done &&
706             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
707                 btree_readahead_hook(fs_info, eb, eb->start, ret);
708
709         if (ret) {
710                 /*
711                  * our io error hook is going to dec the io pages
712                  * again, we have to make sure it has something
713                  * to decrement
714                  */
715                 atomic_inc(&eb->io_pages);
716                 clear_extent_buffer_uptodate(eb);
717         }
718         free_extent_buffer(eb);
719 out:
720         return ret;
721 }
722
723 static int btree_io_failed_hook(struct page *page, int failed_mirror)
724 {
725         struct extent_buffer *eb;
726
727         eb = (struct extent_buffer *)page->private;
728         set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
729         eb->read_mirror = failed_mirror;
730         atomic_dec(&eb->io_pages);
731         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
732                 btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO);
733         return -EIO;    /* we fixed nothing */
734 }
735
736 static void end_workqueue_bio(struct bio *bio)
737 {
738         struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
739         struct btrfs_fs_info *fs_info;
740         struct btrfs_workqueue *wq;
741         btrfs_work_func_t func;
742
743         fs_info = end_io_wq->info;
744         end_io_wq->error = bio->bi_error;
745
746         if (bio->bi_rw & REQ_WRITE) {
747                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
748                         wq = fs_info->endio_meta_write_workers;
749                         func = btrfs_endio_meta_write_helper;
750                 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
751                         wq = fs_info->endio_freespace_worker;
752                         func = btrfs_freespace_write_helper;
753                 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
754                         wq = fs_info->endio_raid56_workers;
755                         func = btrfs_endio_raid56_helper;
756                 } else {
757                         wq = fs_info->endio_write_workers;
758                         func = btrfs_endio_write_helper;
759                 }
760         } else {
761                 if (unlikely(end_io_wq->metadata ==
762                              BTRFS_WQ_ENDIO_DIO_REPAIR)) {
763                         wq = fs_info->endio_repair_workers;
764                         func = btrfs_endio_repair_helper;
765                 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
766                         wq = fs_info->endio_raid56_workers;
767                         func = btrfs_endio_raid56_helper;
768                 } else if (end_io_wq->metadata) {
769                         wq = fs_info->endio_meta_workers;
770                         func = btrfs_endio_meta_helper;
771                 } else {
772                         wq = fs_info->endio_workers;
773                         func = btrfs_endio_helper;
774                 }
775         }
776
777         btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
778         btrfs_queue_work(wq, &end_io_wq->work);
779 }
780
781 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
782                         enum btrfs_wq_endio_type metadata)
783 {
784         struct btrfs_end_io_wq *end_io_wq;
785
786         end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
787         if (!end_io_wq)
788                 return -ENOMEM;
789
790         end_io_wq->private = bio->bi_private;
791         end_io_wq->end_io = bio->bi_end_io;
792         end_io_wq->info = info;
793         end_io_wq->error = 0;
794         end_io_wq->bio = bio;
795         end_io_wq->metadata = metadata;
796
797         bio->bi_private = end_io_wq;
798         bio->bi_end_io = end_workqueue_bio;
799         return 0;
800 }
801
802 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
803 {
804         unsigned long limit = min_t(unsigned long,
805                                     info->thread_pool_size,
806                                     info->fs_devices->open_devices);
807         return 256 * limit;
808 }
809
810 static void run_one_async_start(struct btrfs_work *work)
811 {
812         struct async_submit_bio *async;
813         int ret;
814
815         async = container_of(work, struct  async_submit_bio, work);
816         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
817                                       async->mirror_num, async->bio_flags,
818                                       async->bio_offset);
819         if (ret)
820                 async->error = ret;
821 }
822
823 static void run_one_async_done(struct btrfs_work *work)
824 {
825         struct btrfs_fs_info *fs_info;
826         struct async_submit_bio *async;
827         int limit;
828
829         async = container_of(work, struct  async_submit_bio, work);
830         fs_info = BTRFS_I(async->inode)->root->fs_info;
831
832         limit = btrfs_async_submit_limit(fs_info);
833         limit = limit * 2 / 3;
834
835         /*
836          * atomic_dec_return implies a barrier for waitqueue_active
837          */
838         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
839             waitqueue_active(&fs_info->async_submit_wait))
840                 wake_up(&fs_info->async_submit_wait);
841
842         /* If an error occurred we just want to clean up the bio and move on */
843         if (async->error) {
844                 async->bio->bi_error = async->error;
845                 bio_endio(async->bio);
846                 return;
847         }
848
849         async->submit_bio_done(async->inode, async->rw, async->bio,
850                                async->mirror_num, async->bio_flags,
851                                async->bio_offset);
852 }
853
854 static void run_one_async_free(struct btrfs_work *work)
855 {
856         struct async_submit_bio *async;
857
858         async = container_of(work, struct  async_submit_bio, work);
859         kfree(async);
860 }
861
862 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
863                         int rw, struct bio *bio, int mirror_num,
864                         unsigned long bio_flags,
865                         u64 bio_offset,
866                         extent_submit_bio_hook_t *submit_bio_start,
867                         extent_submit_bio_hook_t *submit_bio_done)
868 {
869         struct async_submit_bio *async;
870
871         async = kmalloc(sizeof(*async), GFP_NOFS);
872         if (!async)
873                 return -ENOMEM;
874
875         async->inode = inode;
876         async->rw = rw;
877         async->bio = bio;
878         async->mirror_num = mirror_num;
879         async->submit_bio_start = submit_bio_start;
880         async->submit_bio_done = submit_bio_done;
881
882         btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
883                         run_one_async_done, run_one_async_free);
884
885         async->bio_flags = bio_flags;
886         async->bio_offset = bio_offset;
887
888         async->error = 0;
889
890         atomic_inc(&fs_info->nr_async_submits);
891
892         if (rw & REQ_SYNC)
893                 btrfs_set_work_high_priority(&async->work);
894
895         btrfs_queue_work(fs_info->workers, &async->work);
896
897         while (atomic_read(&fs_info->async_submit_draining) &&
898               atomic_read(&fs_info->nr_async_submits)) {
899                 wait_event(fs_info->async_submit_wait,
900                            (atomic_read(&fs_info->nr_async_submits) == 0));
901         }
902
903         return 0;
904 }
905
906 static int btree_csum_one_bio(struct bio *bio)
907 {
908         struct bio_vec *bvec;
909         struct btrfs_root *root;
910         int i, ret = 0;
911
912         bio_for_each_segment_all(bvec, bio, i) {
913                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
914                 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
915                 if (ret)
916                         break;
917         }
918
919         return ret;
920 }
921
922 static int __btree_submit_bio_start(struct inode *inode, int rw,
923                                     struct bio *bio, int mirror_num,
924                                     unsigned long bio_flags,
925                                     u64 bio_offset)
926 {
927         /*
928          * when we're called for a write, we're already in the async
929          * submission context.  Just jump into btrfs_map_bio
930          */
931         return btree_csum_one_bio(bio);
932 }
933
934 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
935                                  int mirror_num, unsigned long bio_flags,
936                                  u64 bio_offset)
937 {
938         int ret;
939
940         /*
941          * when we're called for a write, we're already in the async
942          * submission context.  Just jump into btrfs_map_bio
943          */
944         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
945         if (ret) {
946                 bio->bi_error = ret;
947                 bio_endio(bio);
948         }
949         return ret;
950 }
951
952 static int check_async_write(struct inode *inode, unsigned long bio_flags)
953 {
954         if (bio_flags & EXTENT_BIO_TREE_LOG)
955                 return 0;
956 #ifdef CONFIG_X86
957         if (static_cpu_has(X86_FEATURE_XMM4_2))
958                 return 0;
959 #endif
960         return 1;
961 }
962
963 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
964                                  int mirror_num, unsigned long bio_flags,
965                                  u64 bio_offset)
966 {
967         int async = check_async_write(inode, bio_flags);
968         int ret;
969
970         if (!(rw & REQ_WRITE)) {
971                 /*
972                  * called for a read, do the setup so that checksum validation
973                  * can happen in the async kernel threads
974                  */
975                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
976                                           bio, BTRFS_WQ_ENDIO_METADATA);
977                 if (ret)
978                         goto out_w_error;
979                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
980                                     mirror_num, 0);
981         } else if (!async) {
982                 ret = btree_csum_one_bio(bio);
983                 if (ret)
984                         goto out_w_error;
985                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
986                                     mirror_num, 0);
987         } else {
988                 /*
989                  * kthread helpers are used to submit writes so that
990                  * checksumming can happen in parallel across all CPUs
991                  */
992                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
993                                           inode, rw, bio, mirror_num, 0,
994                                           bio_offset,
995                                           __btree_submit_bio_start,
996                                           __btree_submit_bio_done);
997         }
998
999         if (ret)
1000                 goto out_w_error;
1001         return 0;
1002
1003 out_w_error:
1004         bio->bi_error = ret;
1005         bio_endio(bio);
1006         return ret;
1007 }
1008
1009 #ifdef CONFIG_MIGRATION
1010 static int btree_migratepage(struct address_space *mapping,
1011                         struct page *newpage, struct page *page,
1012                         enum migrate_mode mode)
1013 {
1014         /*
1015          * we can't safely write a btree page from here,
1016          * we haven't done the locking hook
1017          */
1018         if (PageDirty(page))
1019                 return -EAGAIN;
1020         /*
1021          * Buffers may be managed in a filesystem specific way.
1022          * We must have no buffers or drop them.
1023          */
1024         if (page_has_private(page) &&
1025             !try_to_release_page(page, GFP_KERNEL))
1026                 return -EAGAIN;
1027         return migrate_page(mapping, newpage, page, mode);
1028 }
1029 #endif
1030
1031
1032 static int btree_writepages(struct address_space *mapping,
1033                             struct writeback_control *wbc)
1034 {
1035         struct btrfs_fs_info *fs_info;
1036         int ret;
1037
1038         if (wbc->sync_mode == WB_SYNC_NONE) {
1039
1040                 if (wbc->for_kupdate)
1041                         return 0;
1042
1043                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
1044                 /* this is a bit racy, but that's ok */
1045                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1046                                              BTRFS_DIRTY_METADATA_THRESH);
1047                 if (ret < 0)
1048                         return 0;
1049         }
1050         return btree_write_cache_pages(mapping, wbc);
1051 }
1052
1053 static int btree_readpage(struct file *file, struct page *page)
1054 {
1055         struct extent_io_tree *tree;
1056         tree = &BTRFS_I(page->mapping->host)->io_tree;
1057         return extent_read_full_page(tree, page, btree_get_extent, 0);
1058 }
1059
1060 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1061 {
1062         if (PageWriteback(page) || PageDirty(page))
1063                 return 0;
1064
1065         return try_release_extent_buffer(page);
1066 }
1067
1068 static void btree_invalidatepage(struct page *page, unsigned int offset,
1069                                  unsigned int length)
1070 {
1071         struct extent_io_tree *tree;
1072         tree = &BTRFS_I(page->mapping->host)->io_tree;
1073         extent_invalidatepage(tree, page, offset);
1074         btree_releasepage(page, GFP_NOFS);
1075         if (PagePrivate(page)) {
1076                 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1077                            "page private not zero on page %llu",
1078                            (unsigned long long)page_offset(page));
1079                 ClearPagePrivate(page);
1080                 set_page_private(page, 0);
1081                 put_page(page);
1082         }
1083 }
1084
1085 static int btree_set_page_dirty(struct page *page)
1086 {
1087 #ifdef DEBUG
1088         struct extent_buffer *eb;
1089
1090         BUG_ON(!PagePrivate(page));
1091         eb = (struct extent_buffer *)page->private;
1092         BUG_ON(!eb);
1093         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1094         BUG_ON(!atomic_read(&eb->refs));
1095         btrfs_assert_tree_locked(eb);
1096 #endif
1097         return __set_page_dirty_nobuffers(page);
1098 }
1099
1100 static const struct address_space_operations btree_aops = {
1101         .readpage       = btree_readpage,
1102         .writepages     = btree_writepages,
1103         .releasepage    = btree_releasepage,
1104         .invalidatepage = btree_invalidatepage,
1105 #ifdef CONFIG_MIGRATION
1106         .migratepage    = btree_migratepage,
1107 #endif
1108         .set_page_dirty = btree_set_page_dirty,
1109 };
1110
1111 void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1112 {
1113         struct extent_buffer *buf = NULL;
1114         struct inode *btree_inode = root->fs_info->btree_inode;
1115
1116         buf = btrfs_find_create_tree_block(root, bytenr);
1117         if (IS_ERR(buf))
1118                 return;
1119         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1120                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1121         free_extent_buffer(buf);
1122 }
1123
1124 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1125                          int mirror_num, struct extent_buffer **eb)
1126 {
1127         struct extent_buffer *buf = NULL;
1128         struct inode *btree_inode = root->fs_info->btree_inode;
1129         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1130         int ret;
1131
1132         buf = btrfs_find_create_tree_block(root, bytenr);
1133         if (IS_ERR(buf))
1134                 return 0;
1135
1136         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1137
1138         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1139                                        btree_get_extent, mirror_num);
1140         if (ret) {
1141                 free_extent_buffer(buf);
1142                 return ret;
1143         }
1144
1145         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1146                 free_extent_buffer(buf);
1147                 return -EIO;
1148         } else if (extent_buffer_uptodate(buf)) {
1149                 *eb = buf;
1150         } else {
1151                 free_extent_buffer(buf);
1152         }
1153         return 0;
1154 }
1155
1156 struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
1157                                             u64 bytenr)
1158 {
1159         return find_extent_buffer(fs_info, bytenr);
1160 }
1161
1162 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1163                                                  u64 bytenr)
1164 {
1165         if (btrfs_is_testing(root->fs_info))
1166                 return alloc_test_extent_buffer(root->fs_info, bytenr,
1167                                 root->nodesize);
1168         return alloc_extent_buffer(root->fs_info, bytenr);
1169 }
1170
1171
1172 int btrfs_write_tree_block(struct extent_buffer *buf)
1173 {
1174         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1175                                         buf->start + buf->len - 1);
1176 }
1177
1178 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1179 {
1180         return filemap_fdatawait_range(buf->pages[0]->mapping,
1181                                        buf->start, buf->start + buf->len - 1);
1182 }
1183
1184 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1185                                       u64 parent_transid)
1186 {
1187         struct extent_buffer *buf = NULL;
1188         int ret;
1189
1190         buf = btrfs_find_create_tree_block(root, bytenr);
1191         if (IS_ERR(buf))
1192                 return buf;
1193
1194         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1195         if (ret) {
1196                 free_extent_buffer(buf);
1197                 return ERR_PTR(ret);
1198         }
1199         return buf;
1200
1201 }
1202
1203 void clean_tree_block(struct btrfs_trans_handle *trans,
1204                       struct btrfs_fs_info *fs_info,
1205                       struct extent_buffer *buf)
1206 {
1207         if (btrfs_header_generation(buf) ==
1208             fs_info->running_transaction->transid) {
1209                 btrfs_assert_tree_locked(buf);
1210
1211                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1212                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1213                                              -buf->len,
1214                                              fs_info->dirty_metadata_batch);
1215                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1216                         btrfs_set_lock_blocking(buf);
1217                         clear_extent_buffer_dirty(buf);
1218                 }
1219         }
1220 }
1221
1222 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1223 {
1224         struct btrfs_subvolume_writers *writers;
1225         int ret;
1226
1227         writers = kmalloc(sizeof(*writers), GFP_NOFS);
1228         if (!writers)
1229                 return ERR_PTR(-ENOMEM);
1230
1231         ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1232         if (ret < 0) {
1233                 kfree(writers);
1234                 return ERR_PTR(ret);
1235         }
1236
1237         init_waitqueue_head(&writers->wait);
1238         return writers;
1239 }
1240
1241 static void
1242 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1243 {
1244         percpu_counter_destroy(&writers->counter);
1245         kfree(writers);
1246 }
1247
1248 static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
1249                          struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1250                          u64 objectid)
1251 {
1252         bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1253         root->node = NULL;
1254         root->commit_root = NULL;
1255         root->sectorsize = sectorsize;
1256         root->nodesize = nodesize;
1257         root->stripesize = stripesize;
1258         root->state = 0;
1259         root->orphan_cleanup_state = 0;
1260
1261         root->objectid = objectid;
1262         root->last_trans = 0;
1263         root->highest_objectid = 0;
1264         root->nr_delalloc_inodes = 0;
1265         root->nr_ordered_extents = 0;
1266         root->name = NULL;
1267         root->inode_tree = RB_ROOT;
1268         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1269         root->block_rsv = NULL;
1270         root->orphan_block_rsv = NULL;
1271
1272         INIT_LIST_HEAD(&root->dirty_list);
1273         INIT_LIST_HEAD(&root->root_list);
1274         INIT_LIST_HEAD(&root->delalloc_inodes);
1275         INIT_LIST_HEAD(&root->delalloc_root);
1276         INIT_LIST_HEAD(&root->ordered_extents);
1277         INIT_LIST_HEAD(&root->ordered_root);
1278         INIT_LIST_HEAD(&root->logged_list[0]);
1279         INIT_LIST_HEAD(&root->logged_list[1]);
1280         spin_lock_init(&root->orphan_lock);
1281         spin_lock_init(&root->inode_lock);
1282         spin_lock_init(&root->delalloc_lock);
1283         spin_lock_init(&root->ordered_extent_lock);
1284         spin_lock_init(&root->accounting_lock);
1285         spin_lock_init(&root->log_extents_lock[0]);
1286         spin_lock_init(&root->log_extents_lock[1]);
1287         mutex_init(&root->objectid_mutex);
1288         mutex_init(&root->log_mutex);
1289         mutex_init(&root->ordered_extent_mutex);
1290         mutex_init(&root->delalloc_mutex);
1291         init_waitqueue_head(&root->log_writer_wait);
1292         init_waitqueue_head(&root->log_commit_wait[0]);
1293         init_waitqueue_head(&root->log_commit_wait[1]);
1294         INIT_LIST_HEAD(&root->log_ctxs[0]);
1295         INIT_LIST_HEAD(&root->log_ctxs[1]);
1296         atomic_set(&root->log_commit[0], 0);
1297         atomic_set(&root->log_commit[1], 0);
1298         atomic_set(&root->log_writers, 0);
1299         atomic_set(&root->log_batch, 0);
1300         atomic_set(&root->orphan_inodes, 0);
1301         atomic_set(&root->refs, 1);
1302         atomic_set(&root->will_be_snapshoted, 0);
1303         atomic_set(&root->qgroup_meta_rsv, 0);
1304         root->log_transid = 0;
1305         root->log_transid_committed = -1;
1306         root->last_log_commit = 0;
1307         if (!dummy)
1308                 extent_io_tree_init(&root->dirty_log_pages,
1309                                      fs_info->btree_inode->i_mapping);
1310
1311         memset(&root->root_key, 0, sizeof(root->root_key));
1312         memset(&root->root_item, 0, sizeof(root->root_item));
1313         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1314         if (!dummy)
1315                 root->defrag_trans_start = fs_info->generation;
1316         else
1317                 root->defrag_trans_start = 0;
1318         root->root_key.objectid = objectid;
1319         root->anon_dev = 0;
1320
1321         spin_lock_init(&root->root_item_lock);
1322 }
1323
1324 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1325                 gfp_t flags)
1326 {
1327         struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1328         if (root)
1329                 root->fs_info = fs_info;
1330         return root;
1331 }
1332
1333 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1334 /* Should only be used by the testing infrastructure */
1335 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
1336                                           u32 sectorsize, u32 nodesize)
1337 {
1338         struct btrfs_root *root;
1339
1340         if (!fs_info)
1341                 return ERR_PTR(-EINVAL);
1342
1343         root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1344         if (!root)
1345                 return ERR_PTR(-ENOMEM);
1346         /* We don't use the stripesize in selftest, set it as sectorsize */
1347         __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
1348                         BTRFS_ROOT_TREE_OBJECTID);
1349         root->alloc_bytenr = 0;
1350
1351         return root;
1352 }
1353 #endif
1354
1355 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1356                                      struct btrfs_fs_info *fs_info,
1357                                      u64 objectid)
1358 {
1359         struct extent_buffer *leaf;
1360         struct btrfs_root *tree_root = fs_info->tree_root;
1361         struct btrfs_root *root;
1362         struct btrfs_key key;
1363         int ret = 0;
1364         uuid_le uuid;
1365
1366         root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1367         if (!root)
1368                 return ERR_PTR(-ENOMEM);
1369
1370         __setup_root(tree_root->nodesize, tree_root->sectorsize,
1371                 tree_root->stripesize, root, fs_info, objectid);
1372         root->root_key.objectid = objectid;
1373         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1374         root->root_key.offset = 0;
1375
1376         leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1377         if (IS_ERR(leaf)) {
1378                 ret = PTR_ERR(leaf);
1379                 leaf = NULL;
1380                 goto fail;
1381         }
1382
1383         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1384         btrfs_set_header_bytenr(leaf, leaf->start);
1385         btrfs_set_header_generation(leaf, trans->transid);
1386         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1387         btrfs_set_header_owner(leaf, objectid);
1388         root->node = leaf;
1389
1390         write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
1391                             BTRFS_FSID_SIZE);
1392         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1393                             btrfs_header_chunk_tree_uuid(leaf),
1394                             BTRFS_UUID_SIZE);
1395         btrfs_mark_buffer_dirty(leaf);
1396
1397         root->commit_root = btrfs_root_node(root);
1398         set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1399
1400         root->root_item.flags = 0;
1401         root->root_item.byte_limit = 0;
1402         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1403         btrfs_set_root_generation(&root->root_item, trans->transid);
1404         btrfs_set_root_level(&root->root_item, 0);
1405         btrfs_set_root_refs(&root->root_item, 1);
1406         btrfs_set_root_used(&root->root_item, leaf->len);
1407         btrfs_set_root_last_snapshot(&root->root_item, 0);
1408         btrfs_set_root_dirid(&root->root_item, 0);
1409         uuid_le_gen(&uuid);
1410         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1411         root->root_item.drop_level = 0;
1412
1413         key.objectid = objectid;
1414         key.type = BTRFS_ROOT_ITEM_KEY;
1415         key.offset = 0;
1416         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1417         if (ret)
1418                 goto fail;
1419
1420         btrfs_tree_unlock(leaf);
1421
1422         return root;
1423
1424 fail:
1425         if (leaf) {
1426                 btrfs_tree_unlock(leaf);
1427                 free_extent_buffer(root->commit_root);
1428                 free_extent_buffer(leaf);
1429         }
1430         kfree(root);
1431
1432         return ERR_PTR(ret);
1433 }
1434
1435 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1436                                          struct btrfs_fs_info *fs_info)
1437 {
1438         struct btrfs_root *root;
1439         struct btrfs_root *tree_root = fs_info->tree_root;
1440         struct extent_buffer *leaf;
1441
1442         root = btrfs_alloc_root(fs_info, GFP_NOFS);
1443         if (!root)
1444                 return ERR_PTR(-ENOMEM);
1445
1446         __setup_root(tree_root->nodesize, tree_root->sectorsize,
1447                      tree_root->stripesize, root, fs_info,
1448                      BTRFS_TREE_LOG_OBJECTID);
1449
1450         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1451         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1452         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1453
1454         /*
1455          * DON'T set REF_COWS for log trees
1456          *
1457          * log trees do not get reference counted because they go away
1458          * before a real commit is actually done.  They do store pointers
1459          * to file data extents, and those reference counts still get
1460          * updated (along with back refs to the log tree).
1461          */
1462
1463         leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1464                         NULL, 0, 0, 0);
1465         if (IS_ERR(leaf)) {
1466                 kfree(root);
1467                 return ERR_CAST(leaf);
1468         }
1469
1470         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1471         btrfs_set_header_bytenr(leaf, leaf->start);
1472         btrfs_set_header_generation(leaf, trans->transid);
1473         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1474         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1475         root->node = leaf;
1476
1477         write_extent_buffer(root->node, root->fs_info->fsid,
1478                             btrfs_header_fsid(), BTRFS_FSID_SIZE);
1479         btrfs_mark_buffer_dirty(root->node);
1480         btrfs_tree_unlock(root->node);
1481         return root;
1482 }
1483
1484 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1485                              struct btrfs_fs_info *fs_info)
1486 {
1487         struct btrfs_root *log_root;
1488
1489         log_root = alloc_log_tree(trans, fs_info);
1490         if (IS_ERR(log_root))
1491                 return PTR_ERR(log_root);
1492         WARN_ON(fs_info->log_root_tree);
1493         fs_info->log_root_tree = log_root;
1494         return 0;
1495 }
1496
1497 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1498                        struct btrfs_root *root)
1499 {
1500         struct btrfs_root *log_root;
1501         struct btrfs_inode_item *inode_item;
1502
1503         log_root = alloc_log_tree(trans, root->fs_info);
1504         if (IS_ERR(log_root))
1505                 return PTR_ERR(log_root);
1506
1507         log_root->last_trans = trans->transid;
1508         log_root->root_key.offset = root->root_key.objectid;
1509
1510         inode_item = &log_root->root_item.inode;
1511         btrfs_set_stack_inode_generation(inode_item, 1);
1512         btrfs_set_stack_inode_size(inode_item, 3);
1513         btrfs_set_stack_inode_nlink(inode_item, 1);
1514         btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
1515         btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1516
1517         btrfs_set_root_node(&log_root->root_item, log_root->node);
1518
1519         WARN_ON(root->log_root);
1520         root->log_root = log_root;
1521         root->log_transid = 0;
1522         root->log_transid_committed = -1;
1523         root->last_log_commit = 0;
1524         return 0;
1525 }
1526
1527 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1528                                                struct btrfs_key *key)
1529 {
1530         struct btrfs_root *root;
1531         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1532         struct btrfs_path *path;
1533         u64 generation;
1534         int ret;
1535
1536         path = btrfs_alloc_path();
1537         if (!path)
1538                 return ERR_PTR(-ENOMEM);
1539
1540         root = btrfs_alloc_root(fs_info, GFP_NOFS);
1541         if (!root) {
1542                 ret = -ENOMEM;
1543                 goto alloc_fail;
1544         }
1545
1546         __setup_root(tree_root->nodesize, tree_root->sectorsize,
1547                 tree_root->stripesize, root, fs_info, key->objectid);
1548
1549         ret = btrfs_find_root(tree_root, key, path,
1550                               &root->root_item, &root->root_key);
1551         if (ret) {
1552                 if (ret > 0)
1553                         ret = -ENOENT;
1554                 goto find_fail;
1555         }
1556
1557         generation = btrfs_root_generation(&root->root_item);
1558         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1559                                      generation);
1560         if (IS_ERR(root->node)) {
1561                 ret = PTR_ERR(root->node);
1562                 goto find_fail;
1563         } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1564                 ret = -EIO;
1565                 free_extent_buffer(root->node);
1566                 goto find_fail;
1567         }
1568         root->commit_root = btrfs_root_node(root);
1569 out:
1570         btrfs_free_path(path);
1571         return root;
1572
1573 find_fail:
1574         kfree(root);
1575 alloc_fail:
1576         root = ERR_PTR(ret);
1577         goto out;
1578 }
1579
1580 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1581                                       struct btrfs_key *location)
1582 {
1583         struct btrfs_root *root;
1584
1585         root = btrfs_read_tree_root(tree_root, location);
1586         if (IS_ERR(root))
1587                 return root;
1588
1589         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1590                 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1591                 btrfs_check_and_init_root_item(&root->root_item);
1592         }
1593
1594         return root;
1595 }
1596
1597 int btrfs_init_fs_root(struct btrfs_root *root)
1598 {
1599         int ret;
1600         struct btrfs_subvolume_writers *writers;
1601
1602         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1603         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1604                                         GFP_NOFS);
1605         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1606                 ret = -ENOMEM;
1607                 goto fail;
1608         }
1609
1610         writers = btrfs_alloc_subvolume_writers();
1611         if (IS_ERR(writers)) {
1612                 ret = PTR_ERR(writers);
1613                 goto fail;
1614         }
1615         root->subv_writers = writers;
1616
1617         btrfs_init_free_ino_ctl(root);
1618         spin_lock_init(&root->ino_cache_lock);
1619         init_waitqueue_head(&root->ino_cache_wait);
1620
1621         ret = get_anon_bdev(&root->anon_dev);
1622         if (ret)
1623                 goto fail;
1624
1625         mutex_lock(&root->objectid_mutex);
1626         ret = btrfs_find_highest_objectid(root,
1627                                         &root->highest_objectid);
1628         if (ret) {
1629                 mutex_unlock(&root->objectid_mutex);
1630                 goto fail;
1631         }
1632
1633         ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1634
1635         mutex_unlock(&root->objectid_mutex);
1636
1637         return 0;
1638 fail:
1639         /* the caller is responsible to call free_fs_root */
1640         return ret;
1641 }
1642
1643 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1644                                         u64 root_id)
1645 {
1646         struct btrfs_root *root;
1647
1648         spin_lock(&fs_info->fs_roots_radix_lock);
1649         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1650                                  (unsigned long)root_id);
1651         spin_unlock(&fs_info->fs_roots_radix_lock);
1652         return root;
1653 }
1654
1655 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1656                          struct btrfs_root *root)
1657 {
1658         int ret;
1659
1660         ret = radix_tree_preload(GFP_NOFS);
1661         if (ret)
1662                 return ret;
1663
1664         spin_lock(&fs_info->fs_roots_radix_lock);
1665         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1666                                 (unsigned long)root->root_key.objectid,
1667                                 root);
1668         if (ret == 0)
1669                 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1670         spin_unlock(&fs_info->fs_roots_radix_lock);
1671         radix_tree_preload_end();
1672
1673         return ret;
1674 }
1675
1676 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1677                                      struct btrfs_key *location,
1678                                      bool check_ref)
1679 {
1680         struct btrfs_root *root;
1681         struct btrfs_path *path;
1682         struct btrfs_key key;
1683         int ret;
1684
1685         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1686                 return fs_info->tree_root;
1687         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1688                 return fs_info->extent_root;
1689         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1690                 return fs_info->chunk_root;
1691         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1692                 return fs_info->dev_root;
1693         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1694                 return fs_info->csum_root;
1695         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1696                 return fs_info->quota_root ? fs_info->quota_root :
1697                                              ERR_PTR(-ENOENT);
1698         if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1699                 return fs_info->uuid_root ? fs_info->uuid_root :
1700                                             ERR_PTR(-ENOENT);
1701         if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1702                 return fs_info->free_space_root ? fs_info->free_space_root :
1703                                                   ERR_PTR(-ENOENT);
1704 again:
1705         root = btrfs_lookup_fs_root(fs_info, location->objectid);
1706         if (root) {
1707                 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1708                         return ERR_PTR(-ENOENT);
1709                 return root;
1710         }
1711
1712         root = btrfs_read_fs_root(fs_info->tree_root, location);
1713         if (IS_ERR(root))
1714                 return root;
1715
1716         if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1717                 ret = -ENOENT;
1718                 goto fail;
1719         }
1720
1721         ret = btrfs_init_fs_root(root);
1722         if (ret)
1723                 goto fail;
1724
1725         path = btrfs_alloc_path();
1726         if (!path) {
1727                 ret = -ENOMEM;
1728                 goto fail;
1729         }
1730         key.objectid = BTRFS_ORPHAN_OBJECTID;
1731         key.type = BTRFS_ORPHAN_ITEM_KEY;
1732         key.offset = location->objectid;
1733
1734         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1735         btrfs_free_path(path);
1736         if (ret < 0)
1737                 goto fail;
1738         if (ret == 0)
1739                 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1740
1741         ret = btrfs_insert_fs_root(fs_info, root);
1742         if (ret) {
1743                 if (ret == -EEXIST) {
1744                         free_fs_root(root);
1745                         goto again;
1746                 }
1747                 goto fail;
1748         }
1749         return root;
1750 fail:
1751         free_fs_root(root);
1752         return ERR_PTR(ret);
1753 }
1754
1755 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1756 {
1757         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1758         int ret = 0;
1759         struct btrfs_device *device;
1760         struct backing_dev_info *bdi;
1761
1762         rcu_read_lock();
1763         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1764                 if (!device->bdev)
1765                         continue;
1766                 bdi = blk_get_backing_dev_info(device->bdev);
1767                 if (bdi_congested(bdi, bdi_bits)) {
1768                         ret = 1;
1769                         break;
1770                 }
1771         }
1772         rcu_read_unlock();
1773         return ret;
1774 }
1775
1776 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1777 {
1778         int err;
1779
1780         err = bdi_setup_and_register(bdi, "btrfs");
1781         if (err)
1782                 return err;
1783
1784         bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
1785         bdi->congested_fn       = btrfs_congested_fn;
1786         bdi->congested_data     = info;
1787         bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
1788         return 0;
1789 }
1790
1791 /*
1792  * called by the kthread helper functions to finally call the bio end_io
1793  * functions.  This is where read checksum verification actually happens
1794  */
1795 static void end_workqueue_fn(struct btrfs_work *work)
1796 {
1797         struct bio *bio;
1798         struct btrfs_end_io_wq *end_io_wq;
1799
1800         end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1801         bio = end_io_wq->bio;
1802
1803         bio->bi_error = end_io_wq->error;
1804         bio->bi_private = end_io_wq->private;
1805         bio->bi_end_io = end_io_wq->end_io;
1806         kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1807         bio_endio(bio);
1808 }
1809
1810 static int cleaner_kthread(void *arg)
1811 {
1812         struct btrfs_root *root = arg;
1813         int again;
1814         struct btrfs_trans_handle *trans;
1815
1816         do {
1817                 again = 0;
1818
1819                 /* Make the cleaner go to sleep early. */
1820                 if (btrfs_need_cleaner_sleep(root))
1821                         goto sleep;
1822
1823                 /*
1824                  * Do not do anything if we might cause open_ctree() to block
1825                  * before we have finished mounting the filesystem.
1826                  */
1827                 if (!root->fs_info->open)
1828                         goto sleep;
1829
1830                 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1831                         goto sleep;
1832
1833                 /*
1834                  * Avoid the problem that we change the status of the fs
1835                  * during the above check and trylock.
1836                  */
1837                 if (btrfs_need_cleaner_sleep(root)) {
1838                         mutex_unlock(&root->fs_info->cleaner_mutex);
1839                         goto sleep;
1840                 }
1841
1842                 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
1843                 btrfs_run_delayed_iputs(root);
1844                 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
1845
1846                 again = btrfs_clean_one_deleted_snapshot(root);
1847                 mutex_unlock(&root->fs_info->cleaner_mutex);
1848
1849                 /*
1850                  * The defragger has dealt with the R/O remount and umount,
1851                  * needn't do anything special here.
1852                  */
1853                 btrfs_run_defrag_inodes(root->fs_info);
1854
1855                 /*
1856                  * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1857                  * with relocation (btrfs_relocate_chunk) and relocation
1858                  * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1859                  * after acquiring fs_info->delete_unused_bgs_mutex. So we
1860                  * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1861                  * unused block groups.
1862                  */
1863                 btrfs_delete_unused_bgs(root->fs_info);
1864 sleep:
1865                 if (!again) {
1866                         set_current_state(TASK_INTERRUPTIBLE);
1867                         if (!kthread_should_stop())
1868                                 schedule();
1869                         __set_current_state(TASK_RUNNING);
1870                 }
1871         } while (!kthread_should_stop());
1872
1873         /*
1874          * Transaction kthread is stopped before us and wakes us up.
1875          * However we might have started a new transaction and COWed some
1876          * tree blocks when deleting unused block groups for example. So
1877          * make sure we commit the transaction we started to have a clean
1878          * shutdown when evicting the btree inode - if it has dirty pages
1879          * when we do the final iput() on it, eviction will trigger a
1880          * writeback for it which will fail with null pointer dereferences
1881          * since work queues and other resources were already released and
1882          * destroyed by the time the iput/eviction/writeback is made.
1883          */
1884         trans = btrfs_attach_transaction(root);
1885         if (IS_ERR(trans)) {
1886                 if (PTR_ERR(trans) != -ENOENT)
1887                         btrfs_err(root->fs_info,
1888                                   "cleaner transaction attach returned %ld",
1889                                   PTR_ERR(trans));
1890         } else {
1891                 int ret;
1892
1893                 ret = btrfs_commit_transaction(trans, root);
1894                 if (ret)
1895                         btrfs_err(root->fs_info,
1896                                   "cleaner open transaction commit returned %d",
1897                                   ret);
1898         }
1899
1900         return 0;
1901 }
1902
1903 static int transaction_kthread(void *arg)
1904 {
1905         struct btrfs_root *root = arg;
1906         struct btrfs_trans_handle *trans;
1907         struct btrfs_transaction *cur;
1908         u64 transid;
1909         unsigned long now;
1910         unsigned long delay;
1911         bool cannot_commit;
1912
1913         do {
1914                 cannot_commit = false;
1915                 delay = HZ * root->fs_info->commit_interval;
1916                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1917
1918                 spin_lock(&root->fs_info->trans_lock);
1919                 cur = root->fs_info->running_transaction;
1920                 if (!cur) {
1921                         spin_unlock(&root->fs_info->trans_lock);
1922                         goto sleep;
1923                 }
1924
1925                 now = get_seconds();
1926                 if (cur->state < TRANS_STATE_BLOCKED &&
1927                     (now < cur->start_time ||
1928                      now - cur->start_time < root->fs_info->commit_interval)) {
1929                         spin_unlock(&root->fs_info->trans_lock);
1930                         delay = HZ * 5;
1931                         goto sleep;
1932                 }
1933                 transid = cur->transid;
1934                 spin_unlock(&root->fs_info->trans_lock);
1935
1936                 /* If the file system is aborted, this will always fail. */
1937                 trans = btrfs_attach_transaction(root);
1938                 if (IS_ERR(trans)) {
1939                         if (PTR_ERR(trans) != -ENOENT)
1940                                 cannot_commit = true;
1941                         goto sleep;
1942                 }
1943                 if (transid == trans->transid) {
1944                         btrfs_commit_transaction(trans, root);
1945                 } else {
1946                         btrfs_end_transaction(trans, root);
1947                 }
1948 sleep:
1949                 wake_up_process(root->fs_info->cleaner_kthread);
1950                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1951
1952                 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1953                                       &root->fs_info->fs_state)))
1954                         btrfs_cleanup_transaction(root);
1955                 set_current_state(TASK_INTERRUPTIBLE);
1956                 if (!kthread_should_stop() &&
1957                                 (!btrfs_transaction_blocked(root->fs_info) ||
1958                                  cannot_commit))
1959                         schedule_timeout(delay);
1960                 __set_current_state(TASK_RUNNING);
1961         } while (!kthread_should_stop());
1962         return 0;
1963 }
1964
1965 /*
1966  * this will find the highest generation in the array of
1967  * root backups.  The index of the highest array is returned,
1968  * or -1 if we can't find anything.
1969  *
1970  * We check to make sure the array is valid by comparing the
1971  * generation of the latest  root in the array with the generation
1972  * in the super block.  If they don't match we pitch it.
1973  */
1974 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1975 {
1976         u64 cur;
1977         int newest_index = -1;
1978         struct btrfs_root_backup *root_backup;
1979         int i;
1980
1981         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1982                 root_backup = info->super_copy->super_roots + i;
1983                 cur = btrfs_backup_tree_root_gen(root_backup);
1984                 if (cur == newest_gen)
1985                         newest_index = i;
1986         }
1987
1988         /* check to see if we actually wrapped around */
1989         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1990                 root_backup = info->super_copy->super_roots;
1991                 cur = btrfs_backup_tree_root_gen(root_backup);
1992                 if (cur == newest_gen)
1993                         newest_index = 0;
1994         }
1995         return newest_index;
1996 }
1997
1998
1999 /*
2000  * find the oldest backup so we know where to store new entries
2001  * in the backup array.  This will set the backup_root_index
2002  * field in the fs_info struct
2003  */
2004 static void find_oldest_super_backup(struct btrfs_fs_info *info,
2005                                      u64 newest_gen)
2006 {
2007         int newest_index = -1;
2008
2009         newest_index = find_newest_super_backup(info, newest_gen);
2010         /* if there was garbage in there, just move along */
2011         if (newest_index == -1) {
2012                 info->backup_root_index = 0;
2013         } else {
2014                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
2015         }
2016 }
2017
2018 /*
2019  * copy all the root pointers into the super backup array.
2020  * this will bump the backup pointer by one when it is
2021  * done
2022  */
2023 static void backup_super_roots(struct btrfs_fs_info *info)
2024 {
2025         int next_backup;
2026         struct btrfs_root_backup *root_backup;
2027         int last_backup;
2028
2029         next_backup = info->backup_root_index;
2030         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
2031                 BTRFS_NUM_BACKUP_ROOTS;
2032
2033         /*
2034          * just overwrite the last backup if we're at the same generation
2035          * this happens only at umount
2036          */
2037         root_backup = info->super_for_commit->super_roots + last_backup;
2038         if (btrfs_backup_tree_root_gen(root_backup) ==
2039             btrfs_header_generation(info->tree_root->node))
2040                 next_backup = last_backup;
2041
2042         root_backup = info->super_for_commit->super_roots + next_backup;
2043
2044         /*
2045          * make sure all of our padding and empty slots get zero filled
2046          * regardless of which ones we use today
2047          */
2048         memset(root_backup, 0, sizeof(*root_backup));
2049
2050         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
2051
2052         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
2053         btrfs_set_backup_tree_root_gen(root_backup,
2054                                btrfs_header_generation(info->tree_root->node));
2055
2056         btrfs_set_backup_tree_root_level(root_backup,
2057                                btrfs_header_level(info->tree_root->node));
2058
2059         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
2060         btrfs_set_backup_chunk_root_gen(root_backup,
2061                                btrfs_header_generation(info->chunk_root->node));
2062         btrfs_set_backup_chunk_root_level(root_backup,
2063                                btrfs_header_level(info->chunk_root->node));
2064
2065         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2066         btrfs_set_backup_extent_root_gen(root_backup,
2067                                btrfs_header_generation(info->extent_root->node));
2068         btrfs_set_backup_extent_root_level(root_backup,
2069                                btrfs_header_level(info->extent_root->node));
2070
2071         /*
2072          * we might commit during log recovery, which happens before we set
2073          * the fs_root.  Make sure it is valid before we fill it in.
2074          */
2075         if (info->fs_root && info->fs_root->node) {
2076                 btrfs_set_backup_fs_root(root_backup,
2077                                          info->fs_root->node->start);
2078                 btrfs_set_backup_fs_root_gen(root_backup,
2079                                btrfs_header_generation(info->fs_root->node));
2080                 btrfs_set_backup_fs_root_level(root_backup,
2081                                btrfs_header_level(info->fs_root->node));
2082         }
2083
2084         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2085         btrfs_set_backup_dev_root_gen(root_backup,
2086                                btrfs_header_generation(info->dev_root->node));
2087         btrfs_set_backup_dev_root_level(root_backup,
2088                                        btrfs_header_level(info->dev_root->node));
2089
2090         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2091         btrfs_set_backup_csum_root_gen(root_backup,
2092                                btrfs_header_generation(info->csum_root->node));
2093         btrfs_set_backup_csum_root_level(root_backup,
2094                                btrfs_header_level(info->csum_root->node));
2095
2096         btrfs_set_backup_total_bytes(root_backup,
2097                              btrfs_super_total_bytes(info->super_copy));
2098         btrfs_set_backup_bytes_used(root_backup,
2099                              btrfs_super_bytes_used(info->super_copy));
2100         btrfs_set_backup_num_devices(root_backup,
2101                              btrfs_super_num_devices(info->super_copy));
2102
2103         /*
2104          * if we don't copy this out to the super_copy, it won't get remembered
2105          * for the next commit
2106          */
2107         memcpy(&info->super_copy->super_roots,
2108                &info->super_for_commit->super_roots,
2109                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2110 }
2111
2112 /*
2113  * this copies info out of the root backup array and back into
2114  * the in-memory super block.  It is meant to help iterate through
2115  * the array, so you send it the number of backups you've already
2116  * tried and the last backup index you used.
2117  *
2118  * this returns -1 when it has tried all the backups
2119  */
2120 static noinline int next_root_backup(struct btrfs_fs_info *info,
2121                                      struct btrfs_super_block *super,
2122                                      int *num_backups_tried, int *backup_index)
2123 {
2124         struct btrfs_root_backup *root_backup;
2125         int newest = *backup_index;
2126
2127         if (*num_backups_tried == 0) {
2128                 u64 gen = btrfs_super_generation(super);
2129
2130                 newest = find_newest_super_backup(info, gen);
2131                 if (newest == -1)
2132                         return -1;
2133
2134                 *backup_index = newest;
2135                 *num_backups_tried = 1;
2136         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2137                 /* we've tried all the backups, all done */
2138                 return -1;
2139         } else {
2140                 /* jump to the next oldest backup */
2141                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2142                         BTRFS_NUM_BACKUP_ROOTS;
2143                 *backup_index = newest;
2144                 *num_backups_tried += 1;
2145         }
2146         root_backup = super->super_roots + newest;
2147
2148         btrfs_set_super_generation(super,
2149                                    btrfs_backup_tree_root_gen(root_backup));
2150         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2151         btrfs_set_super_root_level(super,
2152                                    btrfs_backup_tree_root_level(root_backup));
2153         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2154
2155         /*
2156          * fixme: the total bytes and num_devices need to match or we should
2157          * need a fsck
2158          */
2159         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2160         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2161         return 0;
2162 }
2163
2164 /* helper to cleanup workers */
2165 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2166 {
2167         btrfs_destroy_workqueue(fs_info->fixup_workers);
2168         btrfs_destroy_workqueue(fs_info->delalloc_workers);
2169         btrfs_destroy_workqueue(fs_info->workers);
2170         btrfs_destroy_workqueue(fs_info->endio_workers);
2171         btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2172         btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2173         btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2174         btrfs_destroy_workqueue(fs_info->rmw_workers);
2175         btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2176         btrfs_destroy_workqueue(fs_info->endio_write_workers);
2177         btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2178         btrfs_destroy_workqueue(fs_info->submit_workers);
2179         btrfs_destroy_workqueue(fs_info->delayed_workers);
2180         btrfs_destroy_workqueue(fs_info->caching_workers);
2181         btrfs_destroy_workqueue(fs_info->readahead_workers);
2182         btrfs_destroy_workqueue(fs_info->flush_workers);
2183         btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2184         btrfs_destroy_workqueue(fs_info->extent_workers);
2185 }
2186
2187 static void free_root_extent_buffers(struct btrfs_root *root)
2188 {
2189         if (root) {
2190                 free_extent_buffer(root->node);
2191                 free_extent_buffer(root->commit_root);
2192                 root->node = NULL;
2193                 root->commit_root = NULL;
2194         }
2195 }
2196
2197 /* helper to cleanup tree roots */
2198 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2199 {
2200         free_root_extent_buffers(info->tree_root);
2201
2202         free_root_extent_buffers(info->dev_root);
2203         free_root_extent_buffers(info->extent_root);
2204         free_root_extent_buffers(info->csum_root);
2205         free_root_extent_buffers(info->quota_root);
2206         free_root_extent_buffers(info->uuid_root);
2207         if (chunk_root)
2208                 free_root_extent_buffers(info->chunk_root);
2209         free_root_extent_buffers(info->free_space_root);
2210 }
2211
2212 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2213 {
2214         int ret;
2215         struct btrfs_root *gang[8];
2216         int i;
2217
2218         while (!list_empty(&fs_info->dead_roots)) {
2219                 gang[0] = list_entry(fs_info->dead_roots.next,
2220                                      struct btrfs_root, root_list);
2221                 list_del(&gang[0]->root_list);
2222
2223                 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2224                         btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2225                 } else {
2226                         free_extent_buffer(gang[0]->node);
2227                         free_extent_buffer(gang[0]->commit_root);
2228                         btrfs_put_fs_root(gang[0]);
2229                 }
2230         }
2231
2232         while (1) {
2233                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2234                                              (void **)gang, 0,
2235                                              ARRAY_SIZE(gang));
2236                 if (!ret)
2237                         break;
2238                 for (i = 0; i < ret; i++)
2239                         btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2240         }
2241
2242         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2243                 btrfs_free_log_root_tree(NULL, fs_info);
2244                 btrfs_destroy_pinned_extent(fs_info->tree_root,
2245                                             fs_info->pinned_extents);
2246         }
2247 }
2248
2249 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2250 {
2251         mutex_init(&fs_info->scrub_lock);
2252         atomic_set(&fs_info->scrubs_running, 0);
2253         atomic_set(&fs_info->scrub_pause_req, 0);
2254         atomic_set(&fs_info->scrubs_paused, 0);
2255         atomic_set(&fs_info->scrub_cancel_req, 0);
2256         init_waitqueue_head(&fs_info->scrub_pause_wait);
2257         fs_info->scrub_workers_refcnt = 0;
2258 }
2259
2260 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2261 {
2262         spin_lock_init(&fs_info->balance_lock);
2263         mutex_init(&fs_info->balance_mutex);
2264         atomic_set(&fs_info->balance_running, 0);
2265         atomic_set(&fs_info->balance_pause_req, 0);
2266         atomic_set(&fs_info->balance_cancel_req, 0);
2267         fs_info->balance_ctl = NULL;
2268         init_waitqueue_head(&fs_info->balance_wait_q);
2269 }
2270
2271 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
2272                                    struct btrfs_root *tree_root)
2273 {
2274         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2275         set_nlink(fs_info->btree_inode, 1);
2276         /*
2277          * we set the i_size on the btree inode to the max possible int.
2278          * the real end of the address space is determined by all of
2279          * the devices in the system
2280          */
2281         fs_info->btree_inode->i_size = OFFSET_MAX;
2282         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2283
2284         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2285         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2286                              fs_info->btree_inode->i_mapping);
2287         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2288         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2289
2290         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2291
2292         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2293         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2294                sizeof(struct btrfs_key));
2295         set_bit(BTRFS_INODE_DUMMY,
2296                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2297         btrfs_insert_inode_hash(fs_info->btree_inode);
2298 }
2299
2300 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2301 {
2302         fs_info->dev_replace.lock_owner = 0;
2303         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2304         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2305         rwlock_init(&fs_info->dev_replace.lock);
2306         atomic_set(&fs_info->dev_replace.read_locks, 0);
2307         atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2308         init_waitqueue_head(&fs_info->replace_wait);
2309         init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2310 }
2311
2312 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2313 {
2314         spin_lock_init(&fs_info->qgroup_lock);
2315         mutex_init(&fs_info->qgroup_ioctl_lock);
2316         fs_info->qgroup_tree = RB_ROOT;
2317         fs_info->qgroup_op_tree = RB_ROOT;
2318         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2319         fs_info->qgroup_seq = 1;
2320         fs_info->quota_enabled = 0;
2321         fs_info->pending_quota_state = 0;
2322         fs_info->qgroup_ulist = NULL;
2323         fs_info->qgroup_rescan_running = false;
2324         mutex_init(&fs_info->qgroup_rescan_lock);
2325 }
2326
2327 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2328                 struct btrfs_fs_devices *fs_devices)
2329 {
2330         int max_active = fs_info->thread_pool_size;
2331         unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2332
2333         fs_info->workers =
2334                 btrfs_alloc_workqueue(fs_info, "worker",
2335                                       flags | WQ_HIGHPRI, max_active, 16);
2336
2337         fs_info->delalloc_workers =
2338                 btrfs_alloc_workqueue(fs_info, "delalloc",
2339                                       flags, max_active, 2);
2340
2341         fs_info->flush_workers =
2342                 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2343                                       flags, max_active, 0);
2344
2345         fs_info->caching_workers =
2346                 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2347
2348         /*
2349          * a higher idle thresh on the submit workers makes it much more
2350          * likely that bios will be send down in a sane order to the
2351          * devices
2352          */
2353         fs_info->submit_workers =
2354                 btrfs_alloc_workqueue(fs_info, "submit", flags,
2355                                       min_t(u64, fs_devices->num_devices,
2356                                             max_active), 64);
2357
2358         fs_info->fixup_workers =
2359                 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2360
2361         /*
2362          * endios are largely parallel and should have a very
2363          * low idle thresh
2364          */
2365         fs_info->endio_workers =
2366                 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2367         fs_info->endio_meta_workers =
2368                 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2369                                       max_active, 4);
2370         fs_info->endio_meta_write_workers =
2371                 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2372                                       max_active, 2);
2373         fs_info->endio_raid56_workers =
2374                 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2375                                       max_active, 4);
2376         fs_info->endio_repair_workers =
2377                 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2378         fs_info->rmw_workers =
2379                 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2380         fs_info->endio_write_workers =
2381                 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2382                                       max_active, 2);
2383         fs_info->endio_freespace_worker =
2384                 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2385                                       max_active, 0);
2386         fs_info->delayed_workers =
2387                 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2388                                       max_active, 0);
2389         fs_info->readahead_workers =
2390                 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2391                                       max_active, 2);
2392         fs_info->qgroup_rescan_workers =
2393                 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2394         fs_info->extent_workers =
2395                 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2396                                       min_t(u64, fs_devices->num_devices,
2397                                             max_active), 8);
2398
2399         if (!(fs_info->workers && fs_info->delalloc_workers &&
2400               fs_info->submit_workers && fs_info->flush_workers &&
2401               fs_info->endio_workers && fs_info->endio_meta_workers &&
2402               fs_info->endio_meta_write_workers &&
2403               fs_info->endio_repair_workers &&
2404               fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2405               fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2406               fs_info->caching_workers && fs_info->readahead_workers &&
2407               fs_info->fixup_workers && fs_info->delayed_workers &&
2408               fs_info->extent_workers &&
2409               fs_info->qgroup_rescan_workers)) {
2410                 return -ENOMEM;
2411         }
2412
2413         return 0;
2414 }
2415
2416 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2417                             struct btrfs_fs_devices *fs_devices)
2418 {
2419         int ret;
2420         struct btrfs_root *tree_root = fs_info->tree_root;
2421         struct btrfs_root *log_tree_root;
2422         struct btrfs_super_block *disk_super = fs_info->super_copy;
2423         u64 bytenr = btrfs_super_log_root(disk_super);
2424
2425         if (fs_devices->rw_devices == 0) {
2426                 btrfs_warn(fs_info, "log replay required on RO media");
2427                 return -EIO;
2428         }
2429
2430         log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2431         if (!log_tree_root)
2432                 return -ENOMEM;
2433
2434         __setup_root(tree_root->nodesize, tree_root->sectorsize,
2435                         tree_root->stripesize, log_tree_root, fs_info,
2436                         BTRFS_TREE_LOG_OBJECTID);
2437
2438         log_tree_root->node = read_tree_block(tree_root, bytenr,
2439                         fs_info->generation + 1);
2440         if (IS_ERR(log_tree_root->node)) {
2441                 btrfs_warn(fs_info, "failed to read log tree");
2442                 ret = PTR_ERR(log_tree_root->node);
2443                 kfree(log_tree_root);
2444                 return ret;
2445         } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2446                 btrfs_err(fs_info, "failed to read log tree");
2447                 free_extent_buffer(log_tree_root->node);
2448                 kfree(log_tree_root);
2449                 return -EIO;
2450         }
2451         /* returns with log_tree_root freed on success */
2452         ret = btrfs_recover_log_trees(log_tree_root);
2453         if (ret) {
2454                 btrfs_handle_fs_error(tree_root->fs_info, ret,
2455                             "Failed to recover log tree");
2456                 free_extent_buffer(log_tree_root->node);
2457                 kfree(log_tree_root);
2458                 return ret;
2459         }
2460
2461         if (fs_info->sb->s_flags & MS_RDONLY) {
2462                 ret = btrfs_commit_super(tree_root);
2463                 if (ret)
2464                         return ret;
2465         }
2466
2467         return 0;
2468 }
2469
2470 static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
2471                             struct btrfs_root *tree_root)
2472 {
2473         struct btrfs_root *root;
2474         struct btrfs_key location;
2475         int ret;
2476
2477         location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2478         location.type = BTRFS_ROOT_ITEM_KEY;
2479         location.offset = 0;
2480
2481         root = btrfs_read_tree_root(tree_root, &location);
2482         if (IS_ERR(root))
2483                 return PTR_ERR(root);
2484         set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2485         fs_info->extent_root = root;
2486
2487         location.objectid = BTRFS_DEV_TREE_OBJECTID;
2488         root = btrfs_read_tree_root(tree_root, &location);
2489         if (IS_ERR(root))
2490                 return PTR_ERR(root);
2491         set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2492         fs_info->dev_root = root;
2493         btrfs_init_devices_late(fs_info);
2494
2495         location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2496         root = btrfs_read_tree_root(tree_root, &location);
2497         if (IS_ERR(root))
2498                 return PTR_ERR(root);
2499         set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2500         fs_info->csum_root = root;
2501
2502         location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2503         root = btrfs_read_tree_root(tree_root, &location);
2504         if (!IS_ERR(root)) {
2505                 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2506                 fs_info->quota_enabled = 1;
2507                 fs_info->pending_quota_state = 1;
2508                 fs_info->quota_root = root;
2509         }
2510
2511         location.objectid = BTRFS_UUID_TREE_OBJECTID;
2512         root = btrfs_read_tree_root(tree_root, &location);
2513         if (IS_ERR(root)) {
2514                 ret = PTR_ERR(root);
2515                 if (ret != -ENOENT)
2516                         return ret;
2517         } else {
2518                 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2519                 fs_info->uuid_root = root;
2520         }
2521
2522         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2523                 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2524                 root = btrfs_read_tree_root(tree_root, &location);
2525                 if (IS_ERR(root))
2526                         return PTR_ERR(root);
2527                 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2528                 fs_info->free_space_root = root;
2529         }
2530
2531         return 0;
2532 }
2533
2534 int open_ctree(struct super_block *sb,
2535                struct btrfs_fs_devices *fs_devices,
2536                char *options)
2537 {
2538         u32 sectorsize;
2539         u32 nodesize;
2540         u32 stripesize;
2541         u64 generation;
2542         u64 features;
2543         struct btrfs_key location;
2544         struct buffer_head *bh;
2545         struct btrfs_super_block *disk_super;
2546         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2547         struct btrfs_root *tree_root;
2548         struct btrfs_root *chunk_root;
2549         int ret;
2550         int err = -EINVAL;
2551         int num_backups_tried = 0;
2552         int backup_index = 0;
2553         int max_active;
2554
2555         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2556         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2557         if (!tree_root || !chunk_root) {
2558                 err = -ENOMEM;
2559                 goto fail;
2560         }
2561
2562         ret = init_srcu_struct(&fs_info->subvol_srcu);
2563         if (ret) {
2564                 err = ret;
2565                 goto fail;
2566         }
2567
2568         ret = setup_bdi(fs_info, &fs_info->bdi);
2569         if (ret) {
2570                 err = ret;
2571                 goto fail_srcu;
2572         }
2573
2574         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2575         if (ret) {
2576                 err = ret;
2577                 goto fail_bdi;
2578         }
2579         fs_info->dirty_metadata_batch = PAGE_SIZE *
2580                                         (1 + ilog2(nr_cpu_ids));
2581
2582         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2583         if (ret) {
2584                 err = ret;
2585                 goto fail_dirty_metadata_bytes;
2586         }
2587
2588         ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2589         if (ret) {
2590                 err = ret;
2591                 goto fail_delalloc_bytes;
2592         }
2593
2594         fs_info->btree_inode = new_inode(sb);
2595         if (!fs_info->btree_inode) {
2596                 err = -ENOMEM;
2597                 goto fail_bio_counter;
2598         }
2599
2600         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2601
2602         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2603         INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2604         INIT_LIST_HEAD(&fs_info->trans_list);
2605         INIT_LIST_HEAD(&fs_info->dead_roots);
2606         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2607         INIT_LIST_HEAD(&fs_info->delalloc_roots);
2608         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2609         spin_lock_init(&fs_info->delalloc_root_lock);
2610         spin_lock_init(&fs_info->trans_lock);
2611         spin_lock_init(&fs_info->fs_roots_radix_lock);
2612         spin_lock_init(&fs_info->delayed_iput_lock);
2613         spin_lock_init(&fs_info->defrag_inodes_lock);
2614         spin_lock_init(&fs_info->free_chunk_lock);
2615         spin_lock_init(&fs_info->tree_mod_seq_lock);
2616         spin_lock_init(&fs_info->super_lock);
2617         spin_lock_init(&fs_info->qgroup_op_lock);
2618         spin_lock_init(&fs_info->buffer_lock);
2619         spin_lock_init(&fs_info->unused_bgs_lock);
2620         rwlock_init(&fs_info->tree_mod_log_lock);
2621         mutex_init(&fs_info->unused_bg_unpin_mutex);
2622         mutex_init(&fs_info->delete_unused_bgs_mutex);
2623         mutex_init(&fs_info->reloc_mutex);
2624         mutex_init(&fs_info->delalloc_root_mutex);
2625         mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2626         seqlock_init(&fs_info->profiles_lock);
2627
2628         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2629         INIT_LIST_HEAD(&fs_info->space_info);
2630         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2631         INIT_LIST_HEAD(&fs_info->unused_bgs);
2632         btrfs_mapping_init(&fs_info->mapping_tree);
2633         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2634                              BTRFS_BLOCK_RSV_GLOBAL);
2635         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2636                              BTRFS_BLOCK_RSV_DELALLOC);
2637         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2638         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2639         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2640         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2641                              BTRFS_BLOCK_RSV_DELOPS);
2642         atomic_set(&fs_info->nr_async_submits, 0);
2643         atomic_set(&fs_info->async_delalloc_pages, 0);
2644         atomic_set(&fs_info->async_submit_draining, 0);
2645         atomic_set(&fs_info->nr_async_bios, 0);
2646         atomic_set(&fs_info->defrag_running, 0);
2647         atomic_set(&fs_info->qgroup_op_seq, 0);
2648         atomic_set(&fs_info->reada_works_cnt, 0);
2649         atomic64_set(&fs_info->tree_mod_seq, 0);
2650         fs_info->fs_frozen = 0;
2651         fs_info->sb = sb;
2652         fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2653         fs_info->metadata_ratio = 0;
2654         fs_info->defrag_inodes = RB_ROOT;
2655         fs_info->free_chunk_space = 0;
2656         fs_info->tree_mod_log = RB_ROOT;
2657         fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2658         fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2659         /* readahead state */
2660         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2661         spin_lock_init(&fs_info->reada_lock);
2662
2663         fs_info->thread_pool_size = min_t(unsigned long,
2664                                           num_online_cpus() + 2, 8);
2665
2666         INIT_LIST_HEAD(&fs_info->ordered_roots);
2667         spin_lock_init(&fs_info->ordered_root_lock);
2668         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2669                                         GFP_KERNEL);
2670         if (!fs_info->delayed_root) {
2671                 err = -ENOMEM;
2672                 goto fail_iput;
2673         }
2674         btrfs_init_delayed_root(fs_info->delayed_root);
2675
2676         btrfs_init_scrub(fs_info);
2677 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2678         fs_info->check_integrity_print_mask = 0;
2679 #endif
2680         btrfs_init_balance(fs_info);
2681         btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2682
2683         sb->s_blocksize = 4096;
2684         sb->s_blocksize_bits = blksize_bits(4096);
2685         sb->s_bdi = &fs_info->bdi;
2686
2687         btrfs_init_btree_inode(fs_info, tree_root);
2688
2689         spin_lock_init(&fs_info->block_group_cache_lock);
2690         fs_info->block_group_cache_tree = RB_ROOT;
2691         fs_info->first_logical_byte = (u64)-1;
2692
2693         extent_io_tree_init(&fs_info->freed_extents[0],
2694                              fs_info->btree_inode->i_mapping);
2695         extent_io_tree_init(&fs_info->freed_extents[1],
2696                              fs_info->btree_inode->i_mapping);
2697         fs_info->pinned_extents = &fs_info->freed_extents[0];
2698         fs_info->do_barriers = 1;
2699
2700
2701         mutex_init(&fs_info->ordered_operations_mutex);
2702         mutex_init(&fs_info->tree_log_mutex);
2703         mutex_init(&fs_info->chunk_mutex);
2704         mutex_init(&fs_info->transaction_kthread_mutex);
2705         mutex_init(&fs_info->cleaner_mutex);
2706         mutex_init(&fs_info->volume_mutex);
2707         mutex_init(&fs_info->ro_block_group_mutex);
2708         init_rwsem(&fs_info->commit_root_sem);
2709         init_rwsem(&fs_info->cleanup_work_sem);
2710         init_rwsem(&fs_info->subvol_sem);
2711         sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2712
2713         btrfs_init_dev_replace_locks(fs_info);
2714         btrfs_init_qgroup(fs_info);
2715
2716         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2717         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2718
2719         init_waitqueue_head(&fs_info->transaction_throttle);
2720         init_waitqueue_head(&fs_info->transaction_wait);
2721         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2722         init_waitqueue_head(&fs_info->async_submit_wait);
2723
2724         INIT_LIST_HEAD(&fs_info->pinned_chunks);
2725
2726         ret = btrfs_alloc_stripe_hash_table(fs_info);
2727         if (ret) {
2728                 err = ret;
2729                 goto fail_alloc;
2730         }
2731
2732         __setup_root(4096, 4096, 4096, tree_root,
2733                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2734
2735         invalidate_bdev(fs_devices->latest_bdev);
2736
2737         /*
2738          * Read super block and check the signature bytes only
2739          */
2740         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2741         if (IS_ERR(bh)) {
2742                 err = PTR_ERR(bh);
2743                 goto fail_alloc;
2744         }
2745
2746         /*
2747          * We want to check superblock checksum, the type is stored inside.
2748          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2749          */
2750         if (btrfs_check_super_csum(bh->b_data)) {
2751                 btrfs_err(fs_info, "superblock checksum mismatch");
2752                 err = -EINVAL;
2753                 brelse(bh);
2754                 goto fail_alloc;
2755         }
2756
2757         /*
2758          * super_copy is zeroed at allocation time and we never touch the
2759          * following bytes up to INFO_SIZE, the checksum is calculated from
2760          * the whole block of INFO_SIZE
2761          */
2762         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2763         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2764                sizeof(*fs_info->super_for_commit));
2765         brelse(bh);
2766
2767         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2768
2769         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2770         if (ret) {
2771                 btrfs_err(fs_info, "superblock contains fatal errors");
2772                 err = -EINVAL;
2773                 goto fail_alloc;
2774         }
2775
2776         disk_super = fs_info->super_copy;
2777         if (!btrfs_super_root(disk_super))
2778                 goto fail_alloc;
2779
2780         /* check FS state, whether FS is broken. */
2781         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2782                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2783
2784         /*
2785          * run through our array of backup supers and setup
2786          * our ring pointer to the oldest one
2787          */
2788         generation = btrfs_super_generation(disk_super);
2789         find_oldest_super_backup(fs_info, generation);
2790
2791         /*
2792          * In the long term, we'll store the compression type in the super
2793          * block, and it'll be used for per file compression control.
2794          */
2795         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2796
2797         ret = btrfs_parse_options(tree_root, options, sb->s_flags);
2798         if (ret) {
2799                 err = ret;
2800                 goto fail_alloc;
2801         }
2802
2803         features = btrfs_super_incompat_flags(disk_super) &
2804                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2805         if (features) {
2806                 btrfs_err(fs_info,
2807                     "cannot mount because of unsupported optional features (%llx)",
2808                     features);
2809                 err = -EINVAL;
2810                 goto fail_alloc;
2811         }
2812
2813         features = btrfs_super_incompat_flags(disk_super);
2814         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2815         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2816                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2817
2818         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2819                 btrfs_info(fs_info, "has skinny extents");
2820
2821         /*
2822          * flag our filesystem as having big metadata blocks if
2823          * they are bigger than the page size
2824          */
2825         if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2826                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2827                         btrfs_info(fs_info,
2828                                 "flagging fs with big metadata feature");
2829                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2830         }
2831
2832         nodesize = btrfs_super_nodesize(disk_super);
2833         sectorsize = btrfs_super_sectorsize(disk_super);
2834         stripesize = sectorsize;
2835         fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2836         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2837
2838         /*
2839          * mixed block groups end up with duplicate but slightly offset
2840          * extent buffers for the same range.  It leads to corruptions
2841          */
2842         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2843             (sectorsize != nodesize)) {
2844                 btrfs_err(fs_info,
2845 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2846                         nodesize, sectorsize);
2847                 goto fail_alloc;
2848         }
2849
2850         /*
2851          * Needn't use the lock because there is no other task which will
2852          * update the flag.
2853          */
2854         btrfs_set_super_incompat_flags(disk_super, features);
2855
2856         features = btrfs_super_compat_ro_flags(disk_super) &
2857                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2858         if (!(sb->s_flags & MS_RDONLY) && features) {
2859                 btrfs_err(fs_info,
2860         "cannot mount read-write because of unsupported optional features (%llx)",
2861                        features);
2862                 err = -EINVAL;
2863                 goto fail_alloc;
2864         }
2865
2866         max_active = fs_info->thread_pool_size;
2867
2868         ret = btrfs_init_workqueues(fs_info, fs_devices);
2869         if (ret) {
2870                 err = ret;
2871                 goto fail_sb_buffer;
2872         }
2873
2874         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2875         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2876                                     SZ_4M / PAGE_SIZE);
2877
2878         tree_root->nodesize = nodesize;
2879         tree_root->sectorsize = sectorsize;
2880         tree_root->stripesize = stripesize;
2881
2882         sb->s_blocksize = sectorsize;
2883         sb->s_blocksize_bits = blksize_bits(sectorsize);
2884
2885         mutex_lock(&fs_info->chunk_mutex);
2886         ret = btrfs_read_sys_array(tree_root);
2887         mutex_unlock(&fs_info->chunk_mutex);
2888         if (ret) {
2889                 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2890                 goto fail_sb_buffer;
2891         }
2892
2893         generation = btrfs_super_chunk_root_generation(disk_super);
2894
2895         __setup_root(nodesize, sectorsize, stripesize, chunk_root,
2896                      fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2897
2898         chunk_root->node = read_tree_block(chunk_root,
2899                                            btrfs_super_chunk_root(disk_super),
2900                                            generation);
2901         if (IS_ERR(chunk_root->node) ||
2902             !extent_buffer_uptodate(chunk_root->node)) {
2903                 btrfs_err(fs_info, "failed to read chunk root");
2904                 if (!IS_ERR(chunk_root->node))
2905                         free_extent_buffer(chunk_root->node);
2906                 chunk_root->node = NULL;
2907                 goto fail_tree_roots;
2908         }
2909         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2910         chunk_root->commit_root = btrfs_root_node(chunk_root);
2911
2912         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2913            btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2914
2915         ret = btrfs_read_chunk_tree(chunk_root);
2916         if (ret) {
2917                 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2918                 goto fail_tree_roots;
2919         }
2920
2921         /*
2922          * keep the device that is marked to be the target device for the
2923          * dev_replace procedure
2924          */
2925         btrfs_close_extra_devices(fs_devices, 0);
2926
2927         if (!fs_devices->latest_bdev) {
2928                 btrfs_err(fs_info, "failed to read devices");
2929                 goto fail_tree_roots;
2930         }
2931
2932 retry_root_backup:
2933         generation = btrfs_super_generation(disk_super);
2934
2935         tree_root->node = read_tree_block(tree_root,
2936                                           btrfs_super_root(disk_super),
2937                                           generation);
2938         if (IS_ERR(tree_root->node) ||
2939             !extent_buffer_uptodate(tree_root->node)) {
2940                 btrfs_warn(fs_info, "failed to read tree root");
2941                 if (!IS_ERR(tree_root->node))
2942                         free_extent_buffer(tree_root->node);
2943                 tree_root->node = NULL;
2944                 goto recovery_tree_root;
2945         }
2946
2947         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2948         tree_root->commit_root = btrfs_root_node(tree_root);
2949         btrfs_set_root_refs(&tree_root->root_item, 1);
2950
2951         mutex_lock(&tree_root->objectid_mutex);
2952         ret = btrfs_find_highest_objectid(tree_root,
2953                                         &tree_root->highest_objectid);
2954         if (ret) {
2955                 mutex_unlock(&tree_root->objectid_mutex);
2956                 goto recovery_tree_root;
2957         }
2958
2959         ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2960
2961         mutex_unlock(&tree_root->objectid_mutex);
2962
2963         ret = btrfs_read_roots(fs_info, tree_root);
2964         if (ret)
2965                 goto recovery_tree_root;
2966
2967         fs_info->generation = generation;
2968         fs_info->last_trans_committed = generation;
2969
2970         ret = btrfs_recover_balance(fs_info);
2971         if (ret) {
2972                 btrfs_err(fs_info, "failed to recover balance: %d", ret);
2973                 goto fail_block_groups;
2974         }
2975
2976         ret = btrfs_init_dev_stats(fs_info);
2977         if (ret) {
2978                 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2979                 goto fail_block_groups;
2980         }
2981
2982         ret = btrfs_init_dev_replace(fs_info);
2983         if (ret) {
2984                 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
2985                 goto fail_block_groups;
2986         }
2987
2988         btrfs_close_extra_devices(fs_devices, 1);
2989
2990         ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2991         if (ret) {
2992                 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
2993                                 ret);
2994                 goto fail_block_groups;
2995         }
2996
2997         ret = btrfs_sysfs_add_device(fs_devices);
2998         if (ret) {
2999                 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3000                                 ret);
3001                 goto fail_fsdev_sysfs;
3002         }
3003
3004         ret = btrfs_sysfs_add_mounted(fs_info);
3005         if (ret) {
3006                 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3007                 goto fail_fsdev_sysfs;
3008         }
3009
3010         ret = btrfs_init_space_info(fs_info);
3011         if (ret) {
3012                 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3013                 goto fail_sysfs;
3014         }
3015
3016         ret = btrfs_read_block_groups(fs_info->extent_root);
3017         if (ret) {
3018                 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3019                 goto fail_sysfs;
3020         }
3021         fs_info->num_tolerated_disk_barrier_failures =
3022                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3023         if (fs_info->fs_devices->missing_devices >
3024              fs_info->num_tolerated_disk_barrier_failures &&
3025             !(sb->s_flags & MS_RDONLY)) {
3026                 btrfs_warn(fs_info,
3027 "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
3028                         fs_info->fs_devices->missing_devices,
3029                         fs_info->num_tolerated_disk_barrier_failures);
3030                 goto fail_sysfs;
3031         }
3032
3033         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3034                                                "btrfs-cleaner");
3035         if (IS_ERR(fs_info->cleaner_kthread))
3036                 goto fail_sysfs;
3037
3038         fs_info->transaction_kthread = kthread_run(transaction_kthread,
3039                                                    tree_root,
3040                                                    "btrfs-transaction");
3041         if (IS_ERR(fs_info->transaction_kthread))
3042                 goto fail_cleaner;
3043
3044         if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
3045             !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
3046             !fs_info->fs_devices->rotating) {
3047                 btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
3048                 btrfs_set_opt(fs_info->mount_opt, SSD);
3049         }
3050
3051         /*
3052          * Mount does not set all options immediately, we can do it now and do
3053          * not have to wait for transaction commit
3054          */
3055         btrfs_apply_pending_changes(fs_info);
3056
3057 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3058         if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
3059                 ret = btrfsic_mount(tree_root, fs_devices,
3060                                     btrfs_test_opt(tree_root->fs_info,
3061                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3062                                     1 : 0,
3063                                     fs_info->check_integrity_print_mask);
3064                 if (ret)
3065                         btrfs_warn(fs_info,
3066                                 "failed to initialize integrity check module: %d",
3067                                 ret);
3068         }
3069 #endif
3070         ret = btrfs_read_qgroup_config(fs_info);
3071         if (ret)
3072                 goto fail_trans_kthread;
3073
3074         /* do not make disk changes in broken FS or nologreplay is given */
3075         if (btrfs_super_log_root(disk_super) != 0 &&
3076             !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
3077                 ret = btrfs_replay_log(fs_info, fs_devices);
3078                 if (ret) {
3079                         err = ret;
3080                         goto fail_qgroup;
3081                 }
3082         }
3083
3084         ret = btrfs_find_orphan_roots(tree_root);
3085         if (ret)
3086                 goto fail_qgroup;
3087
3088         if (!(sb->s_flags & MS_RDONLY)) {
3089                 ret = btrfs_cleanup_fs_roots(fs_info);
3090                 if (ret)
3091                         goto fail_qgroup;
3092
3093                 mutex_lock(&fs_info->cleaner_mutex);
3094                 ret = btrfs_recover_relocation(tree_root);
3095                 mutex_unlock(&fs_info->cleaner_mutex);
3096                 if (ret < 0) {
3097                         btrfs_warn(fs_info, "failed to recover relocation: %d",
3098                                         ret);
3099                         err = -EINVAL;
3100                         goto fail_qgroup;
3101                 }
3102         }
3103
3104         location.objectid = BTRFS_FS_TREE_OBJECTID;
3105         location.type = BTRFS_ROOT_ITEM_KEY;
3106         location.offset = 0;
3107
3108         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3109         if (IS_ERR(fs_info->fs_root)) {
3110                 err = PTR_ERR(fs_info->fs_root);
3111                 goto fail_qgroup;
3112         }
3113
3114         if (sb->s_flags & MS_RDONLY)
3115                 return 0;
3116
3117         if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
3118             !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3119                 btrfs_info(fs_info, "creating free space tree");
3120                 ret = btrfs_create_free_space_tree(fs_info);
3121                 if (ret) {
3122                         btrfs_warn(fs_info,
3123                                 "failed to create free space tree: %d", ret);
3124                         close_ctree(tree_root);
3125                         return ret;
3126                 }
3127         }
3128
3129         down_read(&fs_info->cleanup_work_sem);
3130         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3131             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3132                 up_read(&fs_info->cleanup_work_sem);
3133                 close_ctree(tree_root);
3134                 return ret;
3135         }
3136         up_read(&fs_info->cleanup_work_sem);
3137
3138         ret = btrfs_resume_balance_async(fs_info);
3139         if (ret) {
3140                 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3141                 close_ctree(tree_root);
3142                 return ret;
3143         }
3144
3145         ret = btrfs_resume_dev_replace_async(fs_info);
3146         if (ret) {
3147                 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3148                 close_ctree(tree_root);
3149                 return ret;
3150         }
3151
3152         btrfs_qgroup_rescan_resume(fs_info);
3153
3154         if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
3155             btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3156                 btrfs_info(fs_info, "clearing free space tree");
3157                 ret = btrfs_clear_free_space_tree(fs_info);
3158                 if (ret) {
3159                         btrfs_warn(fs_info,
3160                                 "failed to clear free space tree: %d", ret);
3161                         close_ctree(tree_root);
3162                         return ret;
3163                 }
3164         }
3165
3166         if (!fs_info->uuid_root) {
3167                 btrfs_info(fs_info, "creating UUID tree");
3168                 ret = btrfs_create_uuid_tree(fs_info);
3169                 if (ret) {
3170                         btrfs_warn(fs_info,
3171                                 "failed to create the UUID tree: %d", ret);
3172                         close_ctree(tree_root);
3173                         return ret;
3174                 }
3175         } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
3176                    fs_info->generation !=
3177                                 btrfs_super_uuid_tree_generation(disk_super)) {
3178                 btrfs_info(fs_info, "checking UUID tree");
3179                 ret = btrfs_check_uuid_tree(fs_info);
3180                 if (ret) {
3181                         btrfs_warn(fs_info,
3182                                 "failed to check the UUID tree: %d", ret);
3183                         close_ctree(tree_root);
3184                         return ret;
3185                 }
3186         } else {
3187                 fs_info->update_uuid_tree_gen = 1;
3188         }
3189
3190         fs_info->open = 1;
3191
3192         /*
3193          * backuproot only affect mount behavior, and if open_ctree succeeded,
3194          * no need to keep the flag
3195          */
3196         btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3197
3198         return 0;
3199
3200 fail_qgroup:
3201         btrfs_free_qgroup_config(fs_info);
3202 fail_trans_kthread:
3203         kthread_stop(fs_info->transaction_kthread);
3204         btrfs_cleanup_transaction(fs_info->tree_root);
3205         btrfs_free_fs_roots(fs_info);
3206 fail_cleaner:
3207         kthread_stop(fs_info->cleaner_kthread);
3208
3209         /*
3210          * make sure we're done with the btree inode before we stop our
3211          * kthreads
3212          */
3213         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3214
3215 fail_sysfs:
3216         btrfs_sysfs_remove_mounted(fs_info);
3217
3218 fail_fsdev_sysfs:
3219         btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3220
3221 fail_block_groups:
3222         btrfs_put_block_group_cache(fs_info);
3223         btrfs_free_block_groups(fs_info);
3224
3225 fail_tree_roots:
3226         free_root_pointers(fs_info, 1);
3227         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3228
3229 fail_sb_buffer:
3230         btrfs_stop_all_workers(fs_info);
3231 fail_alloc:
3232 fail_iput:
3233         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3234
3235         iput(fs_info->btree_inode);
3236 fail_bio_counter:
3237         percpu_counter_destroy(&fs_info->bio_counter);
3238 fail_delalloc_bytes:
3239         percpu_counter_destroy(&fs_info->delalloc_bytes);
3240 fail_dirty_metadata_bytes:
3241         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3242 fail_bdi:
3243         bdi_destroy(&fs_info->bdi);
3244 fail_srcu:
3245         cleanup_srcu_struct(&fs_info->subvol_srcu);
3246 fail:
3247         btrfs_free_stripe_hash_table(fs_info);
3248         btrfs_close_devices(fs_info->fs_devices);
3249         return err;
3250
3251 recovery_tree_root:
3252         if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
3253                 goto fail_tree_roots;
3254
3255         free_root_pointers(fs_info, 0);
3256
3257         /* don't use the log in recovery mode, it won't be valid */
3258         btrfs_set_super_log_root(disk_super, 0);
3259
3260         /* we can't trust the free space cache either */
3261         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3262
3263         ret = next_root_backup(fs_info, fs_info->super_copy,
3264                                &num_backups_tried, &backup_index);
3265         if (ret == -1)
3266                 goto fail_block_groups;
3267         goto retry_root_backup;
3268 }
3269
3270 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3271 {
3272         if (uptodate) {
3273                 set_buffer_uptodate(bh);
3274         } else {
3275                 struct btrfs_device *device = (struct btrfs_device *)
3276                         bh->b_private;
3277
3278                 btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
3279                                 "lost page write due to IO error on %s",
3280                                           rcu_str_deref(device->name));
3281                 /* note, we don't set_buffer_write_io_error because we have
3282                  * our own ways of dealing with the IO errors
3283                  */
3284                 clear_buffer_uptodate(bh);
3285                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3286         }
3287         unlock_buffer(bh);
3288         put_bh(bh);
3289 }
3290
3291 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3292                         struct buffer_head **bh_ret)
3293 {
3294         struct buffer_head *bh;
3295         struct btrfs_super_block *super;
3296         u64 bytenr;
3297
3298         bytenr = btrfs_sb_offset(copy_num);
3299         if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3300                 return -EINVAL;
3301
3302         bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
3303         /*
3304          * If we fail to read from the underlying devices, as of now
3305          * the best option we have is to mark it EIO.
3306          */
3307         if (!bh)
3308                 return -EIO;
3309
3310         super = (struct btrfs_super_block *)bh->b_data;
3311         if (btrfs_super_bytenr(super) != bytenr ||
3312                     btrfs_super_magic(super) != BTRFS_MAGIC) {
3313                 brelse(bh);
3314                 return -EINVAL;
3315         }
3316
3317         *bh_ret = bh;
3318         return 0;
3319 }
3320
3321
3322 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3323 {
3324         struct buffer_head *bh;
3325         struct buffer_head *latest = NULL;
3326         struct btrfs_super_block *super;
3327         int i;
3328         u64 transid = 0;
3329         int ret = -EINVAL;
3330
3331         /* we would like to check all the supers, but that would make
3332          * a btrfs mount succeed after a mkfs from a different FS.
3333          * So, we need to add a special mount option to scan for
3334          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3335          */
3336         for (i = 0; i < 1; i++) {
3337                 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3338                 if (ret)
3339                         continue;
3340
3341                 super = (struct btrfs_super_block *)bh->b_data;
3342
3343                 if (!latest || btrfs_super_generation(super) > transid) {
3344                         brelse(latest);
3345                         latest = bh;
3346                         transid = btrfs_super_generation(super);
3347                 } else {
3348                         brelse(bh);
3349                 }
3350         }
3351
3352         if (!latest)
3353                 return ERR_PTR(ret);
3354
3355         return latest;
3356 }
3357
3358 /*
3359  * this should be called twice, once with wait == 0 and
3360  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3361  * we write are pinned.
3362  *
3363  * They are released when wait == 1 is done.
3364  * max_mirrors must be the same for both runs, and it indicates how
3365  * many supers on this one device should be written.
3366  *
3367  * max_mirrors == 0 means to write them all.
3368  */
3369 static int write_dev_supers(struct btrfs_device *device,
3370                             struct btrfs_super_block *sb,
3371                             int do_barriers, int wait, int max_mirrors)
3372 {
3373         struct buffer_head *bh;
3374         int i;
3375         int ret;
3376         int errors = 0;
3377         u32 crc;
3378         u64 bytenr;
3379
3380         if (max_mirrors == 0)
3381                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3382
3383         for (i = 0; i < max_mirrors; i++) {
3384                 bytenr = btrfs_sb_offset(i);
3385                 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3386                     device->commit_total_bytes)
3387                         break;
3388
3389                 if (wait) {
3390                         bh = __find_get_block(device->bdev, bytenr / 4096,
3391                                               BTRFS_SUPER_INFO_SIZE);
3392                         if (!bh) {
3393                                 errors++;
3394                                 continue;
3395                         }
3396                         wait_on_buffer(bh);
3397                         if (!buffer_uptodate(bh))
3398                                 errors++;
3399
3400                         /* drop our reference */
3401                         brelse(bh);
3402
3403                         /* drop the reference from the wait == 0 run */
3404                         brelse(bh);
3405                         continue;
3406                 } else {
3407                         btrfs_set_super_bytenr(sb, bytenr);
3408
3409                         crc = ~(u32)0;
3410                         crc = btrfs_csum_data((char *)sb +
3411                                               BTRFS_CSUM_SIZE, crc,
3412                                               BTRFS_SUPER_INFO_SIZE -
3413                                               BTRFS_CSUM_SIZE);
3414                         btrfs_csum_final(crc, sb->csum);
3415
3416                         /*
3417                          * one reference for us, and we leave it for the
3418                          * caller
3419                          */
3420                         bh = __getblk(device->bdev, bytenr / 4096,
3421                                       BTRFS_SUPER_INFO_SIZE);
3422                         if (!bh) {
3423                                 btrfs_err(device->dev_root->fs_info,
3424                                     "couldn't get super buffer head for bytenr %llu",
3425                                     bytenr);
3426                                 errors++;
3427                                 continue;
3428                         }
3429
3430                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3431
3432                         /* one reference for submit_bh */
3433                         get_bh(bh);
3434
3435                         set_buffer_uptodate(bh);
3436                         lock_buffer(bh);
3437                         bh->b_end_io = btrfs_end_buffer_write_sync;
3438                         bh->b_private = device;
3439                 }
3440
3441                 /*
3442                  * we fua the first super.  The others we allow
3443                  * to go down lazy.
3444                  */
3445                 if (i == 0)
3446                         ret = btrfsic_submit_bh(WRITE_FUA, bh);
3447                 else
3448                         ret = btrfsic_submit_bh(WRITE_SYNC, bh);
3449                 if (ret)
3450                         errors++;
3451         }
3452         return errors < i ? 0 : -1;
3453 }
3454
3455 /*
3456  * endio for the write_dev_flush, this will wake anyone waiting
3457  * for the barrier when it is done
3458  */
3459 static void btrfs_end_empty_barrier(struct bio *bio)
3460 {
3461         if (bio->bi_private)
3462                 complete(bio->bi_private);
3463         bio_put(bio);
3464 }
3465
3466 /*
3467  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3468  * sent down.  With wait == 1, it waits for the previous flush.
3469  *
3470  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3471  * capable
3472  */
3473 static int write_dev_flush(struct btrfs_device *device, int wait)
3474 {
3475         struct bio *bio;
3476         int ret = 0;
3477
3478         if (device->nobarriers)
3479                 return 0;
3480
3481         if (wait) {
3482                 bio = device->flush_bio;
3483                 if (!bio)
3484                         return 0;
3485
3486                 wait_for_completion(&device->flush_wait);
3487
3488                 if (bio->bi_error) {
3489                         ret = bio->bi_error;
3490                         btrfs_dev_stat_inc_and_print(device,
3491                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3492                 }
3493
3494                 /* drop the reference from the wait == 0 run */
3495                 bio_put(bio);
3496                 device->flush_bio = NULL;
3497
3498                 return ret;
3499         }
3500
3501         /*
3502          * one reference for us, and we leave it for the
3503          * caller
3504          */
3505         device->flush_bio = NULL;
3506         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3507         if (!bio)
3508                 return -ENOMEM;
3509
3510         bio->bi_end_io = btrfs_end_empty_barrier;
3511         bio->bi_bdev = device->bdev;
3512         init_completion(&device->flush_wait);
3513         bio->bi_private = &device->flush_wait;
3514         device->flush_bio = bio;
3515
3516         bio_get(bio);
3517         btrfsic_submit_bio(WRITE_FLUSH, bio);
3518
3519         return 0;
3520 }
3521
3522 /*
3523  * send an empty flush down to each device in parallel,
3524  * then wait for them
3525  */
3526 static int barrier_all_devices(struct btrfs_fs_info *info)
3527 {
3528         struct list_head *head;
3529         struct btrfs_device *dev;
3530         int errors_send = 0;
3531         int errors_wait = 0;
3532         int ret;
3533
3534         /* send down all the barriers */
3535         head = &info->fs_devices->devices;
3536         list_for_each_entry_rcu(dev, head, dev_list) {
3537                 if (dev->missing)
3538                         continue;
3539                 if (!dev->bdev) {
3540                         errors_send++;
3541                         continue;
3542                 }
3543                 if (!dev->in_fs_metadata || !dev->writeable)
3544                         continue;
3545
3546                 ret = write_dev_flush(dev, 0);
3547                 if (ret)
3548                         errors_send++;
3549         }
3550
3551         /* wait for all the barriers */
3552         list_for_each_entry_rcu(dev, head, dev_list) {
3553                 if (dev->missing)
3554                         continue;
3555                 if (!dev->bdev) {
3556                         errors_wait++;
3557                         continue;
3558                 }
3559                 if (!dev->in_fs_metadata || !dev->writeable)
3560                         continue;
3561
3562                 ret = write_dev_flush(dev, 1);
3563                 if (ret)
3564                         errors_wait++;
3565         }
3566         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3567             errors_wait > info->num_tolerated_disk_barrier_failures)
3568                 return -EIO;
3569         return 0;
3570 }
3571
3572 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3573 {
3574         int raid_type;
3575         int min_tolerated = INT_MAX;
3576
3577         if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3578             (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3579                 min_tolerated = min(min_tolerated,
3580                                     btrfs_raid_array[BTRFS_RAID_SINGLE].
3581                                     tolerated_failures);
3582
3583         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3584                 if (raid_type == BTRFS_RAID_SINGLE)
3585                         continue;
3586                 if (!(flags & btrfs_raid_group[raid_type]))
3587                         continue;
3588                 min_tolerated = min(min_tolerated,
3589                                     btrfs_raid_array[raid_type].
3590                                     tolerated_failures);
3591         }
3592
3593         if (min_tolerated == INT_MAX) {
3594                 pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
3595                 min_tolerated = 0;
3596         }
3597
3598         return min_tolerated;
3599 }
3600
3601 int btrfs_calc_num_tolerated_disk_barrier_failures(
3602         struct btrfs_fs_info *fs_info)
3603 {
3604         struct btrfs_ioctl_space_info space;
3605         struct btrfs_space_info *sinfo;
3606         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3607                        BTRFS_BLOCK_GROUP_SYSTEM,
3608                        BTRFS_BLOCK_GROUP_METADATA,
3609                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3610         int i;
3611         int c;
3612         int num_tolerated_disk_barrier_failures =
3613                 (int)fs_info->fs_devices->num_devices;
3614
3615         for (i = 0; i < ARRAY_SIZE(types); i++) {
3616                 struct btrfs_space_info *tmp;
3617
3618                 sinfo = NULL;
3619                 rcu_read_lock();
3620                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3621                         if (tmp->flags == types[i]) {
3622                                 sinfo = tmp;
3623                                 break;
3624                         }
3625                 }
3626                 rcu_read_unlock();
3627
3628                 if (!sinfo)
3629                         continue;
3630
3631                 down_read(&sinfo->groups_sem);
3632                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3633                         u64 flags;
3634
3635                         if (list_empty(&sinfo->block_groups[c]))
3636                                 continue;
3637
3638                         btrfs_get_block_group_info(&sinfo->block_groups[c],
3639                                                    &space);
3640                         if (space.total_bytes == 0 || space.used_bytes == 0)
3641                                 continue;
3642                         flags = space.flags;
3643
3644                         num_tolerated_disk_barrier_failures = min(
3645                                 num_tolerated_disk_barrier_failures,
3646                                 btrfs_get_num_tolerated_disk_barrier_failures(
3647                                         flags));
3648                 }
3649                 up_read(&sinfo->groups_sem);
3650         }
3651
3652         return num_tolerated_disk_barrier_failures;
3653 }
3654
3655 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3656 {
3657         struct list_head *head;
3658         struct btrfs_device *dev;
3659         struct btrfs_super_block *sb;
3660         struct btrfs_dev_item *dev_item;
3661         int ret;
3662         int do_barriers;
3663         int max_errors;
3664         int total_errors = 0;
3665         u64 flags;
3666
3667         do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
3668         backup_super_roots(root->fs_info);
3669
3670         sb = root->fs_info->super_for_commit;
3671         dev_item = &sb->dev_item;
3672
3673         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3674         head = &root->fs_info->fs_devices->devices;
3675         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3676
3677         if (do_barriers) {
3678                 ret = barrier_all_devices(root->fs_info);
3679                 if (ret) {
3680                         mutex_unlock(
3681                                 &root->fs_info->fs_devices->device_list_mutex);
3682                         btrfs_handle_fs_error(root->fs_info, ret,
3683                                     "errors while submitting device barriers.");
3684                         return ret;
3685                 }
3686         }
3687
3688         list_for_each_entry_rcu(dev, head, dev_list) {
3689                 if (!dev->bdev) {
3690                         total_errors++;
3691                         continue;
3692                 }
3693                 if (!dev->in_fs_metadata || !dev->writeable)
3694                         continue;
3695
3696                 btrfs_set_stack_device_generation(dev_item, 0);
3697                 btrfs_set_stack_device_type(dev_item, dev->type);
3698                 btrfs_set_stack_device_id(dev_item, dev->devid);
3699                 btrfs_set_stack_device_total_bytes(dev_item,
3700                                                    dev->commit_total_bytes);
3701                 btrfs_set_stack_device_bytes_used(dev_item,
3702                                                   dev->commit_bytes_used);
3703                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3704                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3705                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3706                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3707                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3708
3709                 flags = btrfs_super_flags(sb);
3710                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3711
3712                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3713                 if (ret)
3714                         total_errors++;
3715         }
3716         if (total_errors > max_errors) {
3717                 btrfs_err(root->fs_info, "%d errors while writing supers",
3718                        total_errors);
3719                 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3720
3721                 /* FUA is masked off if unsupported and can't be the reason */
3722                 btrfs_handle_fs_error(root->fs_info, -EIO,
3723                             "%d errors while writing supers", total_errors);
3724                 return -EIO;
3725         }
3726
3727         total_errors = 0;
3728         list_for_each_entry_rcu(dev, head, dev_list) {
3729                 if (!dev->bdev)
3730                         continue;
3731                 if (!dev->in_fs_metadata || !dev->writeable)
3732                         continue;
3733
3734                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3735                 if (ret)
3736                         total_errors++;
3737         }
3738         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3739         if (total_errors > max_errors) {
3740                 btrfs_handle_fs_error(root->fs_info, -EIO,
3741                             "%d errors while writing supers", total_errors);
3742                 return -EIO;
3743         }
3744         return 0;
3745 }
3746
3747 int write_ctree_super(struct btrfs_trans_handle *trans,
3748                       struct btrfs_root *root, int max_mirrors)
3749 {
3750         return write_all_supers(root, max_mirrors);
3751 }
3752
3753 /* Drop a fs root from the radix tree and free it. */
3754 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3755                                   struct btrfs_root *root)
3756 {
3757         spin_lock(&fs_info->fs_roots_radix_lock);
3758         radix_tree_delete(&fs_info->fs_roots_radix,
3759                           (unsigned long)root->root_key.objectid);
3760         spin_unlock(&fs_info->fs_roots_radix_lock);
3761
3762         if (btrfs_root_refs(&root->root_item) == 0)
3763                 synchronize_srcu(&fs_info->subvol_srcu);
3764
3765         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3766                 btrfs_free_log(NULL, root);
3767                 if (root->reloc_root) {
3768                         free_extent_buffer(root->reloc_root->node);
3769                         free_extent_buffer(root->reloc_root->commit_root);
3770                         btrfs_put_fs_root(root->reloc_root);
3771                         root->reloc_root = NULL;
3772                 }
3773         }
3774
3775         if (root->free_ino_pinned)
3776                 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3777         if (root->free_ino_ctl)
3778                 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3779         free_fs_root(root);
3780 }
3781
3782 static void free_fs_root(struct btrfs_root *root)
3783 {
3784         iput(root->ino_cache_inode);
3785         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3786         btrfs_free_block_rsv(root, root->orphan_block_rsv);
3787         root->orphan_block_rsv = NULL;
3788         if (root->anon_dev)
3789                 free_anon_bdev(root->anon_dev);
3790         if (root->subv_writers)
3791                 btrfs_free_subvolume_writers(root->subv_writers);
3792         free_extent_buffer(root->node);
3793         free_extent_buffer(root->commit_root);
3794         kfree(root->free_ino_ctl);
3795         kfree(root->free_ino_pinned);
3796         kfree(root->name);
3797         btrfs_put_fs_root(root);
3798 }
3799
3800 void btrfs_free_fs_root(struct btrfs_root *root)
3801 {
3802         free_fs_root(root);
3803 }
3804
3805 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3806 {
3807         u64 root_objectid = 0;
3808         struct btrfs_root *gang[8];
3809         int i = 0;
3810         int err = 0;
3811         unsigned int ret = 0;
3812         int index;
3813
3814         while (1) {
3815                 index = srcu_read_lock(&fs_info->subvol_srcu);
3816                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3817                                              (void **)gang, root_objectid,
3818                                              ARRAY_SIZE(gang));
3819                 if (!ret) {
3820                         srcu_read_unlock(&fs_info->subvol_srcu, index);
3821                         break;
3822                 }
3823                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3824
3825                 for (i = 0; i < ret; i++) {
3826                         /* Avoid to grab roots in dead_roots */
3827                         if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3828                                 gang[i] = NULL;
3829                                 continue;
3830                         }
3831                         /* grab all the search result for later use */
3832                         gang[i] = btrfs_grab_fs_root(gang[i]);
3833                 }
3834                 srcu_read_unlock(&fs_info->subvol_srcu, index);
3835
3836                 for (i = 0; i < ret; i++) {
3837                         if (!gang[i])
3838                                 continue;
3839                         root_objectid = gang[i]->root_key.objectid;
3840                         err = btrfs_orphan_cleanup(gang[i]);
3841                         if (err)
3842                                 break;
3843                         btrfs_put_fs_root(gang[i]);
3844                 }
3845                 root_objectid++;
3846         }
3847
3848         /* release the uncleaned roots due to error */
3849         for (; i < ret; i++) {
3850                 if (gang[i])
3851                         btrfs_put_fs_root(gang[i]);
3852         }
3853         return err;
3854 }
3855
3856 int btrfs_commit_super(struct btrfs_root *root)
3857 {
3858         struct btrfs_trans_handle *trans;
3859
3860         mutex_lock(&root->fs_info->cleaner_mutex);
3861         btrfs_run_delayed_iputs(root);
3862         mutex_unlock(&root->fs_info->cleaner_mutex);
3863         wake_up_process(root->fs_info->cleaner_kthread);
3864
3865         /* wait until ongoing cleanup work done */
3866         down_write(&root->fs_info->cleanup_work_sem);
3867         up_write(&root->fs_info->cleanup_work_sem);
3868
3869         trans = btrfs_join_transaction(root);
3870         if (IS_ERR(trans))
3871                 return PTR_ERR(trans);
3872         return btrfs_commit_transaction(trans, root);
3873 }
3874
3875 void close_ctree(struct btrfs_root *root)
3876 {
3877         struct btrfs_fs_info *fs_info = root->fs_info;
3878         int ret;
3879
3880         fs_info->closing = 1;
3881         smp_mb();
3882
3883         /* wait for the qgroup rescan worker to stop */
3884         btrfs_qgroup_wait_for_completion(fs_info, false);
3885
3886         /* wait for the uuid_scan task to finish */
3887         down(&fs_info->uuid_tree_rescan_sem);
3888         /* avoid complains from lockdep et al., set sem back to initial state */
3889         up(&fs_info->uuid_tree_rescan_sem);
3890
3891         /* pause restriper - we want to resume on mount */
3892         btrfs_pause_balance(fs_info);
3893
3894         btrfs_dev_replace_suspend_for_unmount(fs_info);
3895
3896         btrfs_scrub_cancel(fs_info);
3897
3898         /* wait for any defraggers to finish */
3899         wait_event(fs_info->transaction_wait,
3900                    (atomic_read(&fs_info->defrag_running) == 0));
3901
3902         /* clear out the rbtree of defraggable inodes */
3903         btrfs_cleanup_defrag_inodes(fs_info);
3904
3905         cancel_work_sync(&fs_info->async_reclaim_work);
3906
3907         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3908                 /*
3909                  * If the cleaner thread is stopped and there are
3910                  * block groups queued for removal, the deletion will be
3911                  * skipped when we quit the cleaner thread.
3912                  */
3913                 btrfs_delete_unused_bgs(root->fs_info);
3914
3915                 ret = btrfs_commit_super(root);
3916                 if (ret)
3917                         btrfs_err(fs_info, "commit super ret %d", ret);
3918         }
3919
3920         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3921                 btrfs_error_commit_super(root);
3922
3923         kthread_stop(fs_info->transaction_kthread);
3924         kthread_stop(fs_info->cleaner_kthread);
3925
3926         fs_info->closing = 2;
3927         smp_mb();
3928
3929         btrfs_free_qgroup_config(fs_info);
3930
3931         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3932                 btrfs_info(fs_info, "at unmount delalloc count %lld",
3933                        percpu_counter_sum(&fs_info->delalloc_bytes));
3934         }
3935
3936         btrfs_sysfs_remove_mounted(fs_info);
3937         btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3938
3939         btrfs_free_fs_roots(fs_info);
3940
3941         btrfs_put_block_group_cache(fs_info);
3942
3943         btrfs_free_block_groups(fs_info);
3944
3945         /*
3946          * we must make sure there is not any read request to
3947          * submit after we stopping all workers.
3948          */
3949         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3950         btrfs_stop_all_workers(fs_info);
3951
3952         fs_info->open = 0;
3953         free_root_pointers(fs_info, 1);
3954
3955         iput(fs_info->btree_inode);
3956
3957 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3958         if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
3959                 btrfsic_unmount(root, fs_info->fs_devices);
3960 #endif
3961
3962         btrfs_close_devices(fs_info->fs_devices);
3963         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3964
3965         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3966         percpu_counter_destroy(&fs_info->delalloc_bytes);
3967         percpu_counter_destroy(&fs_info->bio_counter);
3968         bdi_destroy(&fs_info->bdi);
3969         cleanup_srcu_struct(&fs_info->subvol_srcu);
3970
3971         btrfs_free_stripe_hash_table(fs_info);
3972
3973         __btrfs_free_block_rsv(root->orphan_block_rsv);
3974         root->orphan_block_rsv = NULL;
3975
3976         lock_chunks(root);
3977         while (!list_empty(&fs_info->pinned_chunks)) {
3978                 struct extent_map *em;
3979
3980                 em = list_first_entry(&fs_info->pinned_chunks,
3981                                       struct extent_map, list);
3982                 list_del_init(&em->list);
3983                 free_extent_map(em);
3984         }
3985         unlock_chunks(root);
3986 }
3987
3988 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3989                           int atomic)
3990 {
3991         int ret;
3992         struct inode *btree_inode = buf->pages[0]->mapping->host;
3993
3994         ret = extent_buffer_uptodate(buf);
3995         if (!ret)
3996                 return ret;
3997
3998         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3999                                     parent_transid, atomic);
4000         if (ret == -EAGAIN)
4001                 return ret;
4002         return !ret;
4003 }
4004
4005 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4006 {
4007         struct btrfs_root *root;
4008         u64 transid = btrfs_header_generation(buf);
4009         int was_dirty;
4010
4011 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4012         /*
4013          * This is a fast path so only do this check if we have sanity tests
4014          * enabled.  Normal people shouldn't be marking dummy buffers as dirty
4015          * outside of the sanity tests.
4016          */
4017         if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
4018                 return;
4019 #endif
4020         root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4021         btrfs_assert_tree_locked(buf);
4022         if (transid != root->fs_info->generation)
4023                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
4024                        "found %llu running %llu\n",
4025                         buf->start, transid, root->fs_info->generation);
4026         was_dirty = set_extent_buffer_dirty(buf);
4027         if (!was_dirty)
4028                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
4029                                      buf->len,
4030                                      root->fs_info->dirty_metadata_batch);
4031 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4032         if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4033                 btrfs_print_leaf(root, buf);
4034                 ASSERT(0);
4035         }
4036 #endif
4037 }
4038
4039 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
4040                                         int flush_delayed)
4041 {
4042         /*
4043          * looks as though older kernels can get into trouble with
4044          * this code, they end up stuck in balance_dirty_pages forever
4045          */
4046         int ret;
4047
4048         if (current->flags & PF_MEMALLOC)
4049                 return;
4050
4051         if (flush_delayed)
4052                 btrfs_balance_delayed_items(root);
4053
4054         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
4055                                      BTRFS_DIRTY_METADATA_THRESH);
4056         if (ret > 0) {
4057                 balance_dirty_pages_ratelimited(
4058                                    root->fs_info->btree_inode->i_mapping);
4059         }
4060 }
4061
4062 void btrfs_btree_balance_dirty(struct btrfs_root *root)
4063 {
4064         __btrfs_btree_balance_dirty(root, 1);
4065 }
4066
4067 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
4068 {
4069         __btrfs_btree_balance_dirty(root, 0);
4070 }
4071
4072 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4073 {
4074         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4075         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
4076 }
4077
4078 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4079                               int read_only)
4080 {
4081         struct btrfs_super_block *sb = fs_info->super_copy;
4082         u64 nodesize = btrfs_super_nodesize(sb);
4083         u64 sectorsize = btrfs_super_sectorsize(sb);
4084         int ret = 0;
4085
4086         if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4087                 printk(KERN_ERR "BTRFS: no valid FS found\n");
4088                 ret = -EINVAL;
4089         }
4090         if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4091                 printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
4092                                 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4093         if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4094                 printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
4095                                 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4096                 ret = -EINVAL;
4097         }
4098         if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4099                 printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
4100                                 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4101                 ret = -EINVAL;
4102         }
4103         if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4104                 printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
4105                                 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4106                 ret = -EINVAL;
4107         }
4108
4109         /*
4110          * Check sectorsize and nodesize first, other check will need it.
4111          * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4112          */
4113         if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4114             sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4115                 printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
4116                 ret = -EINVAL;
4117         }
4118         /* Only PAGE SIZE is supported yet */
4119         if (sectorsize != PAGE_SIZE) {
4120                 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
4121                                 sectorsize, PAGE_SIZE);
4122                 ret = -EINVAL;
4123         }
4124         if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4125             nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4126                 printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
4127                 ret = -EINVAL;
4128         }
4129         if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4130                 printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
4131                                 le32_to_cpu(sb->__unused_leafsize),
4132                                 nodesize);
4133                 ret = -EINVAL;
4134         }
4135
4136         /* Root alignment check */
4137         if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4138                 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
4139                                 btrfs_super_root(sb));
4140                 ret = -EINVAL;
4141         }
4142         if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4143                 printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
4144                                 btrfs_super_chunk_root(sb));
4145                 ret = -EINVAL;
4146         }
4147         if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4148                 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
4149                                 btrfs_super_log_root(sb));
4150                 ret = -EINVAL;
4151         }
4152
4153         if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
4154                 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
4155                                 fs_info->fsid, sb->dev_item.fsid);
4156                 ret = -EINVAL;
4157         }
4158
4159         /*
4160          * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4161          * done later
4162          */
4163         if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4164                 btrfs_err(fs_info, "bytes_used is too small %llu",
4165                        btrfs_super_bytes_used(sb));
4166                 ret = -EINVAL;
4167         }
4168         if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4169                 btrfs_err(fs_info, "invalid stripesize %u",
4170                        btrfs_super_stripesize(sb));
4171                 ret = -EINVAL;
4172         }
4173         if (btrfs_super_num_devices(sb) > (1UL << 31))
4174                 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
4175                                 btrfs_super_num_devices(sb));
4176         if (btrfs_super_num_devices(sb) == 0) {
4177                 printk(KERN_ERR "BTRFS: number of devices is 0\n");
4178                 ret = -EINVAL;
4179         }
4180
4181         if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4182                 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
4183                                 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4184                 ret = -EINVAL;
4185         }
4186
4187         /*
4188          * Obvious sys_chunk_array corruptions, it must hold at least one key
4189          * and one chunk
4190          */
4191         if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4192                 printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
4193                                 btrfs_super_sys_array_size(sb),
4194                                 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4195                 ret = -EINVAL;
4196         }
4197         if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4198                         + sizeof(struct btrfs_chunk)) {
4199                 printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
4200                                 btrfs_super_sys_array_size(sb),
4201                                 sizeof(struct btrfs_disk_key)
4202                                 + sizeof(struct btrfs_chunk));
4203                 ret = -EINVAL;
4204         }
4205
4206         /*
4207          * The generation is a global counter, we'll trust it more than the others
4208          * but it's still possible that it's the one that's wrong.
4209          */
4210         if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4211                 printk(KERN_WARNING
4212                         "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
4213                         btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
4214         if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4215             && btrfs_super_cache_generation(sb) != (u64)-1)
4216                 printk(KERN_WARNING
4217                         "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
4218                         btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
4219
4220         return ret;
4221 }
4222
4223 static void btrfs_error_commit_super(struct btrfs_root *root)
4224 {
4225         mutex_lock(&root->fs_info->cleaner_mutex);
4226         btrfs_run_delayed_iputs(root);
4227         mutex_unlock(&root->fs_info->cleaner_mutex);
4228
4229         down_write(&root->fs_info->cleanup_work_sem);
4230         up_write(&root->fs_info->cleanup_work_sem);
4231
4232         /* cleanup FS via transaction */
4233         btrfs_cleanup_transaction(root);
4234 }
4235
4236 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4237 {
4238         struct btrfs_ordered_extent *ordered;
4239
4240         spin_lock(&root->ordered_extent_lock);
4241         /*
4242          * This will just short circuit the ordered completion stuff which will
4243          * make sure the ordered extent gets properly cleaned up.
4244          */
4245         list_for_each_entry(ordered, &root->ordered_extents,
4246                             root_extent_list)
4247                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4248         spin_unlock(&root->ordered_extent_lock);
4249 }
4250
4251 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4252 {
4253         struct btrfs_root *root;
4254         struct list_head splice;
4255
4256         INIT_LIST_HEAD(&splice);
4257
4258         spin_lock(&fs_info->ordered_root_lock);
4259         list_splice_init(&fs_info->ordered_roots, &splice);
4260         while (!list_empty(&splice)) {
4261                 root = list_first_entry(&splice, struct btrfs_root,
4262                                         ordered_root);
4263                 list_move_tail(&root->ordered_root,
4264                                &fs_info->ordered_roots);
4265
4266                 spin_unlock(&fs_info->ordered_root_lock);
4267                 btrfs_destroy_ordered_extents(root);
4268
4269                 cond_resched();
4270                 spin_lock(&fs_info->ordered_root_lock);
4271         }
4272         spin_unlock(&fs_info->ordered_root_lock);
4273 }
4274
4275 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4276                                       struct btrfs_root *root)
4277 {
4278         struct rb_node *node;
4279         struct btrfs_delayed_ref_root *delayed_refs;
4280         struct btrfs_delayed_ref_node *ref;
4281         int ret = 0;
4282
4283         delayed_refs = &trans->delayed_refs;
4284
4285         spin_lock(&delayed_refs->lock);
4286         if (atomic_read(&delayed_refs->num_entries) == 0) {
4287                 spin_unlock(&delayed_refs->lock);
4288                 btrfs_info(root->fs_info, "delayed_refs has NO entry");
4289                 return ret;
4290         }
4291
4292         while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4293                 struct btrfs_delayed_ref_head *head;
4294                 struct btrfs_delayed_ref_node *tmp;
4295                 bool pin_bytes = false;
4296
4297                 head = rb_entry(node, struct btrfs_delayed_ref_head,
4298                                 href_node);
4299                 if (!mutex_trylock(&head->mutex)) {
4300                         atomic_inc(&head->node.refs);
4301                         spin_unlock(&delayed_refs->lock);
4302
4303                         mutex_lock(&head->mutex);
4304                         mutex_unlock(&head->mutex);
4305                         btrfs_put_delayed_ref(&head->node);
4306                         spin_lock(&delayed_refs->lock);
4307                         continue;
4308                 }
4309                 spin_lock(&head->lock);
4310                 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4311                                                  list) {
4312                         ref->in_tree = 0;
4313                         list_del(&ref->list);
4314                         atomic_dec(&delayed_refs->num_entries);
4315                         btrfs_put_delayed_ref(ref);
4316                 }
4317                 if (head->must_insert_reserved)
4318                         pin_bytes = true;
4319                 btrfs_free_delayed_extent_op(head->extent_op);
4320                 delayed_refs->num_heads--;
4321                 if (head->processing == 0)
4322                         delayed_refs->num_heads_ready--;
4323                 atomic_dec(&delayed_refs->num_entries);
4324                 head->node.in_tree = 0;
4325                 rb_erase(&head->href_node, &delayed_refs->href_root);
4326                 spin_unlock(&head->lock);
4327                 spin_unlock(&delayed_refs->lock);
4328                 mutex_unlock(&head->mutex);
4329
4330                 if (pin_bytes)
4331                         btrfs_pin_extent(root, head->node.bytenr,
4332                                          head->node.num_bytes, 1);
4333                 btrfs_put_delayed_ref(&head->node);
4334                 cond_resched();
4335                 spin_lock(&delayed_refs->lock);
4336         }
4337
4338         spin_unlock(&delayed_refs->lock);
4339
4340         return ret;
4341 }
4342
4343 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4344 {
4345         struct btrfs_inode *btrfs_inode;
4346         struct list_head splice;
4347
4348         INIT_LIST_HEAD(&splice);
4349
4350         spin_lock(&root->delalloc_lock);
4351         list_splice_init(&root->delalloc_inodes, &splice);
4352
4353         while (!list_empty(&splice)) {
4354                 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4355                                                delalloc_inodes);
4356
4357                 list_del_init(&btrfs_inode->delalloc_inodes);
4358                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4359                           &btrfs_inode->runtime_flags);
4360                 spin_unlock(&root->delalloc_lock);
4361
4362                 btrfs_invalidate_inodes(btrfs_inode->root);
4363
4364                 spin_lock(&root->delalloc_lock);
4365         }
4366
4367         spin_unlock(&root->delalloc_lock);
4368 }
4369
4370 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4371 {
4372         struct btrfs_root *root;
4373         struct list_head splice;
4374
4375         INIT_LIST_HEAD(&splice);
4376
4377         spin_lock(&fs_info->delalloc_root_lock);
4378         list_splice_init(&fs_info->delalloc_roots, &splice);
4379         while (!list_empty(&splice)) {
4380                 root = list_first_entry(&splice, struct btrfs_root,
4381                                          delalloc_root);
4382                 list_del_init(&root->delalloc_root);
4383                 root = btrfs_grab_fs_root(root);
4384                 BUG_ON(!root);
4385                 spin_unlock(&fs_info->delalloc_root_lock);
4386
4387                 btrfs_destroy_delalloc_inodes(root);
4388                 btrfs_put_fs_root(root);
4389
4390                 spin_lock(&fs_info->delalloc_root_lock);
4391         }
4392         spin_unlock(&fs_info->delalloc_root_lock);
4393 }
4394
4395 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
4396                                         struct extent_io_tree *dirty_pages,
4397                                         int mark)
4398 {
4399         int ret;
4400         struct extent_buffer *eb;
4401         u64 start = 0;
4402         u64 end;
4403
4404         while (1) {
4405                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4406                                             mark, NULL);
4407                 if (ret)
4408                         break;
4409
4410                 clear_extent_bits(dirty_pages, start, end, mark);
4411                 while (start <= end) {
4412                         eb = btrfs_find_tree_block(root->fs_info, start);
4413                         start += root->nodesize;
4414                         if (!eb)
4415                                 continue;
4416                         wait_on_extent_buffer_writeback(eb);
4417
4418                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4419                                                &eb->bflags))
4420                                 clear_extent_buffer_dirty(eb);
4421                         free_extent_buffer_stale(eb);
4422                 }
4423         }
4424
4425         return ret;
4426 }
4427
4428 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
4429                                        struct extent_io_tree *pinned_extents)
4430 {
4431         struct extent_io_tree *unpin;
4432         u64 start;
4433         u64 end;
4434         int ret;
4435         bool loop = true;
4436
4437         unpin = pinned_extents;
4438 again:
4439         while (1) {
4440                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4441                                             EXTENT_DIRTY, NULL);
4442                 if (ret)
4443                         break;
4444
4445                 clear_extent_dirty(unpin, start, end);
4446                 btrfs_error_unpin_extent_range(root, start, end);
4447                 cond_resched();
4448         }
4449
4450         if (loop) {
4451                 if (unpin == &root->fs_info->freed_extents[0])
4452                         unpin = &root->fs_info->freed_extents[1];
4453                 else
4454                         unpin = &root->fs_info->freed_extents[0];
4455                 loop = false;
4456                 goto again;
4457         }
4458
4459         return 0;
4460 }
4461
4462 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4463                                    struct btrfs_root *root)
4464 {
4465         btrfs_destroy_delayed_refs(cur_trans, root);
4466
4467         cur_trans->state = TRANS_STATE_COMMIT_START;
4468         wake_up(&root->fs_info->transaction_blocked_wait);
4469
4470         cur_trans->state = TRANS_STATE_UNBLOCKED;
4471         wake_up(&root->fs_info->transaction_wait);
4472
4473         btrfs_destroy_delayed_inodes(root);
4474         btrfs_assert_delayed_root_empty(root);
4475
4476         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
4477                                      EXTENT_DIRTY);
4478         btrfs_destroy_pinned_extent(root,
4479                                     root->fs_info->pinned_extents);
4480
4481         cur_trans->state =TRANS_STATE_COMPLETED;
4482         wake_up(&cur_trans->commit_wait);
4483
4484         /*
4485         memset(cur_trans, 0, sizeof(*cur_trans));
4486         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
4487         */
4488 }
4489
4490 static int btrfs_cleanup_transaction(struct btrfs_root *root)
4491 {
4492         struct btrfs_transaction *t;
4493
4494         mutex_lock(&root->fs_info->transaction_kthread_mutex);
4495
4496         spin_lock(&root->fs_info->trans_lock);
4497         while (!list_empty(&root->fs_info->trans_list)) {
4498                 t = list_first_entry(&root->fs_info->trans_list,
4499                                      struct btrfs_transaction, list);
4500                 if (t->state >= TRANS_STATE_COMMIT_START) {
4501                         atomic_inc(&t->use_count);
4502                         spin_unlock(&root->fs_info->trans_lock);
4503                         btrfs_wait_for_commit(root, t->transid);
4504                         btrfs_put_transaction(t);
4505                         spin_lock(&root->fs_info->trans_lock);
4506                         continue;
4507                 }
4508                 if (t == root->fs_info->running_transaction) {
4509                         t->state = TRANS_STATE_COMMIT_DOING;
4510                         spin_unlock(&root->fs_info->trans_lock);
4511                         /*
4512                          * We wait for 0 num_writers since we don't hold a trans
4513                          * handle open currently for this transaction.
4514                          */
4515                         wait_event(t->writer_wait,
4516                                    atomic_read(&t->num_writers) == 0);
4517                 } else {
4518                         spin_unlock(&root->fs_info->trans_lock);
4519                 }
4520                 btrfs_cleanup_one_transaction(t, root);
4521
4522                 spin_lock(&root->fs_info->trans_lock);
4523                 if (t == root->fs_info->running_transaction)
4524                         root->fs_info->running_transaction = NULL;
4525                 list_del_init(&t->list);
4526                 spin_unlock(&root->fs_info->trans_lock);
4527
4528                 btrfs_put_transaction(t);
4529                 trace_btrfs_transaction_commit(root);
4530                 spin_lock(&root->fs_info->trans_lock);
4531         }
4532         spin_unlock(&root->fs_info->trans_lock);
4533         btrfs_destroy_all_ordered_extents(root->fs_info);
4534         btrfs_destroy_delayed_inodes(root);
4535         btrfs_assert_delayed_root_empty(root);
4536         btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
4537         btrfs_destroy_all_delalloc_inodes(root->fs_info);
4538         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4539
4540         return 0;
4541 }
4542
4543 static const struct extent_io_ops btree_extent_io_ops = {
4544         .readpage_end_io_hook = btree_readpage_end_io_hook,
4545         .readpage_io_failed_hook = btree_io_failed_hook,
4546         .submit_bio_hook = btree_submit_bio_hook,
4547         /* note we're sharing with inode.c for the merge bio hook */
4548         .merge_bio_hook = btrfs_merge_bio_hook,
4549 };