btrfs: don't BUG_ON btrfs_alloc_path() errors
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 /* control flags for do_chunk_alloc's force field
37  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38  * if we really need one.
39  *
40  * CHUNK_ALLOC_FORCE means it must try to allocate one
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  */
49 enum {
50         CHUNK_ALLOC_NO_FORCE = 0,
51         CHUNK_ALLOC_FORCE = 1,
52         CHUNK_ALLOC_LIMITED = 2,
53 };
54
55 static int update_block_group(struct btrfs_trans_handle *trans,
56                               struct btrfs_root *root,
57                               u64 bytenr, u64 num_bytes, int alloc);
58 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
59                                 struct btrfs_root *root,
60                                 u64 bytenr, u64 num_bytes, u64 parent,
61                                 u64 root_objectid, u64 owner_objectid,
62                                 u64 owner_offset, int refs_to_drop,
63                                 struct btrfs_delayed_extent_op *extra_op);
64 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
65                                     struct extent_buffer *leaf,
66                                     struct btrfs_extent_item *ei);
67 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
68                                       struct btrfs_root *root,
69                                       u64 parent, u64 root_objectid,
70                                       u64 flags, u64 owner, u64 offset,
71                                       struct btrfs_key *ins, int ref_mod);
72 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
73                                      struct btrfs_root *root,
74                                      u64 parent, u64 root_objectid,
75                                      u64 flags, struct btrfs_disk_key *key,
76                                      int level, struct btrfs_key *ins);
77 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
78                           struct btrfs_root *extent_root, u64 alloc_bytes,
79                           u64 flags, int force);
80 static int find_next_key(struct btrfs_path *path, int level,
81                          struct btrfs_key *key);
82 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
83                             int dump_block_groups);
84
85 static noinline int
86 block_group_cache_done(struct btrfs_block_group_cache *cache)
87 {
88         smp_mb();
89         return cache->cached == BTRFS_CACHE_FINISHED;
90 }
91
92 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
93 {
94         return (cache->flags & bits) == bits;
95 }
96
97 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
98 {
99         atomic_inc(&cache->count);
100 }
101
102 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
103 {
104         if (atomic_dec_and_test(&cache->count)) {
105                 WARN_ON(cache->pinned > 0);
106                 WARN_ON(cache->reserved > 0);
107                 WARN_ON(cache->reserved_pinned > 0);
108                 kfree(cache->free_space_ctl);
109                 kfree(cache);
110         }
111 }
112
113 /*
114  * this adds the block group to the fs_info rb tree for the block group
115  * cache
116  */
117 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
118                                 struct btrfs_block_group_cache *block_group)
119 {
120         struct rb_node **p;
121         struct rb_node *parent = NULL;
122         struct btrfs_block_group_cache *cache;
123
124         spin_lock(&info->block_group_cache_lock);
125         p = &info->block_group_cache_tree.rb_node;
126
127         while (*p) {
128                 parent = *p;
129                 cache = rb_entry(parent, struct btrfs_block_group_cache,
130                                  cache_node);
131                 if (block_group->key.objectid < cache->key.objectid) {
132                         p = &(*p)->rb_left;
133                 } else if (block_group->key.objectid > cache->key.objectid) {
134                         p = &(*p)->rb_right;
135                 } else {
136                         spin_unlock(&info->block_group_cache_lock);
137                         return -EEXIST;
138                 }
139         }
140
141         rb_link_node(&block_group->cache_node, parent, p);
142         rb_insert_color(&block_group->cache_node,
143                         &info->block_group_cache_tree);
144         spin_unlock(&info->block_group_cache_lock);
145
146         return 0;
147 }
148
149 /*
150  * This will return the block group at or after bytenr if contains is 0, else
151  * it will return the block group that contains the bytenr
152  */
153 static struct btrfs_block_group_cache *
154 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
155                               int contains)
156 {
157         struct btrfs_block_group_cache *cache, *ret = NULL;
158         struct rb_node *n;
159         u64 end, start;
160
161         spin_lock(&info->block_group_cache_lock);
162         n = info->block_group_cache_tree.rb_node;
163
164         while (n) {
165                 cache = rb_entry(n, struct btrfs_block_group_cache,
166                                  cache_node);
167                 end = cache->key.objectid + cache->key.offset - 1;
168                 start = cache->key.objectid;
169
170                 if (bytenr < start) {
171                         if (!contains && (!ret || start < ret->key.objectid))
172                                 ret = cache;
173                         n = n->rb_left;
174                 } else if (bytenr > start) {
175                         if (contains && bytenr <= end) {
176                                 ret = cache;
177                                 break;
178                         }
179                         n = n->rb_right;
180                 } else {
181                         ret = cache;
182                         break;
183                 }
184         }
185         if (ret)
186                 btrfs_get_block_group(ret);
187         spin_unlock(&info->block_group_cache_lock);
188
189         return ret;
190 }
191
192 static int add_excluded_extent(struct btrfs_root *root,
193                                u64 start, u64 num_bytes)
194 {
195         u64 end = start + num_bytes - 1;
196         set_extent_bits(&root->fs_info->freed_extents[0],
197                         start, end, EXTENT_UPTODATE, GFP_NOFS);
198         set_extent_bits(&root->fs_info->freed_extents[1],
199                         start, end, EXTENT_UPTODATE, GFP_NOFS);
200         return 0;
201 }
202
203 static void free_excluded_extents(struct btrfs_root *root,
204                                   struct btrfs_block_group_cache *cache)
205 {
206         u64 start, end;
207
208         start = cache->key.objectid;
209         end = start + cache->key.offset - 1;
210
211         clear_extent_bits(&root->fs_info->freed_extents[0],
212                           start, end, EXTENT_UPTODATE, GFP_NOFS);
213         clear_extent_bits(&root->fs_info->freed_extents[1],
214                           start, end, EXTENT_UPTODATE, GFP_NOFS);
215 }
216
217 static int exclude_super_stripes(struct btrfs_root *root,
218                                  struct btrfs_block_group_cache *cache)
219 {
220         u64 bytenr;
221         u64 *logical;
222         int stripe_len;
223         int i, nr, ret;
224
225         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
226                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
227                 cache->bytes_super += stripe_len;
228                 ret = add_excluded_extent(root, cache->key.objectid,
229                                           stripe_len);
230                 BUG_ON(ret);
231         }
232
233         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
234                 bytenr = btrfs_sb_offset(i);
235                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
236                                        cache->key.objectid, bytenr,
237                                        0, &logical, &nr, &stripe_len);
238                 BUG_ON(ret);
239
240                 while (nr--) {
241                         cache->bytes_super += stripe_len;
242                         ret = add_excluded_extent(root, logical[nr],
243                                                   stripe_len);
244                         BUG_ON(ret);
245                 }
246
247                 kfree(logical);
248         }
249         return 0;
250 }
251
252 static struct btrfs_caching_control *
253 get_caching_control(struct btrfs_block_group_cache *cache)
254 {
255         struct btrfs_caching_control *ctl;
256
257         spin_lock(&cache->lock);
258         if (cache->cached != BTRFS_CACHE_STARTED) {
259                 spin_unlock(&cache->lock);
260                 return NULL;
261         }
262
263         /* We're loading it the fast way, so we don't have a caching_ctl. */
264         if (!cache->caching_ctl) {
265                 spin_unlock(&cache->lock);
266                 return NULL;
267         }
268
269         ctl = cache->caching_ctl;
270         atomic_inc(&ctl->count);
271         spin_unlock(&cache->lock);
272         return ctl;
273 }
274
275 static void put_caching_control(struct btrfs_caching_control *ctl)
276 {
277         if (atomic_dec_and_test(&ctl->count))
278                 kfree(ctl);
279 }
280
281 /*
282  * this is only called by cache_block_group, since we could have freed extents
283  * we need to check the pinned_extents for any extents that can't be used yet
284  * since their free space will be released as soon as the transaction commits.
285  */
286 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
287                               struct btrfs_fs_info *info, u64 start, u64 end)
288 {
289         u64 extent_start, extent_end, size, total_added = 0;
290         int ret;
291
292         while (start < end) {
293                 ret = find_first_extent_bit(info->pinned_extents, start,
294                                             &extent_start, &extent_end,
295                                             EXTENT_DIRTY | EXTENT_UPTODATE);
296                 if (ret)
297                         break;
298
299                 if (extent_start <= start) {
300                         start = extent_end + 1;
301                 } else if (extent_start > start && extent_start < end) {
302                         size = extent_start - start;
303                         total_added += size;
304                         ret = btrfs_add_free_space(block_group, start,
305                                                    size);
306                         BUG_ON(ret);
307                         start = extent_end + 1;
308                 } else {
309                         break;
310                 }
311         }
312
313         if (start < end) {
314                 size = end - start;
315                 total_added += size;
316                 ret = btrfs_add_free_space(block_group, start, size);
317                 BUG_ON(ret);
318         }
319
320         return total_added;
321 }
322
323 static int caching_kthread(void *data)
324 {
325         struct btrfs_block_group_cache *block_group = data;
326         struct btrfs_fs_info *fs_info = block_group->fs_info;
327         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
328         struct btrfs_root *extent_root = fs_info->extent_root;
329         struct btrfs_path *path;
330         struct extent_buffer *leaf;
331         struct btrfs_key key;
332         u64 total_found = 0;
333         u64 last = 0;
334         u32 nritems;
335         int ret = 0;
336
337         path = btrfs_alloc_path();
338         if (!path)
339                 return -ENOMEM;
340
341         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
342
343         /*
344          * We don't want to deadlock with somebody trying to allocate a new
345          * extent for the extent root while also trying to search the extent
346          * root to add free space.  So we skip locking and search the commit
347          * root, since its read-only
348          */
349         path->skip_locking = 1;
350         path->search_commit_root = 1;
351         path->reada = 1;
352
353         key.objectid = last;
354         key.offset = 0;
355         key.type = BTRFS_EXTENT_ITEM_KEY;
356 again:
357         mutex_lock(&caching_ctl->mutex);
358         /* need to make sure the commit_root doesn't disappear */
359         down_read(&fs_info->extent_commit_sem);
360
361         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
362         if (ret < 0)
363                 goto err;
364
365         leaf = path->nodes[0];
366         nritems = btrfs_header_nritems(leaf);
367
368         while (1) {
369                 if (btrfs_fs_closing(fs_info) > 1) {
370                         last = (u64)-1;
371                         break;
372                 }
373
374                 if (path->slots[0] < nritems) {
375                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
376                 } else {
377                         ret = find_next_key(path, 0, &key);
378                         if (ret)
379                                 break;
380
381                         if (need_resched() ||
382                             btrfs_next_leaf(extent_root, path)) {
383                                 caching_ctl->progress = last;
384                                 btrfs_release_path(path);
385                                 up_read(&fs_info->extent_commit_sem);
386                                 mutex_unlock(&caching_ctl->mutex);
387                                 cond_resched();
388                                 goto again;
389                         }
390                         leaf = path->nodes[0];
391                         nritems = btrfs_header_nritems(leaf);
392                         continue;
393                 }
394
395                 if (key.objectid < block_group->key.objectid) {
396                         path->slots[0]++;
397                         continue;
398                 }
399
400                 if (key.objectid >= block_group->key.objectid +
401                     block_group->key.offset)
402                         break;
403
404                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
405                         total_found += add_new_free_space(block_group,
406                                                           fs_info, last,
407                                                           key.objectid);
408                         last = key.objectid + key.offset;
409
410                         if (total_found > (1024 * 1024 * 2)) {
411                                 total_found = 0;
412                                 wake_up(&caching_ctl->wait);
413                         }
414                 }
415                 path->slots[0]++;
416         }
417         ret = 0;
418
419         total_found += add_new_free_space(block_group, fs_info, last,
420                                           block_group->key.objectid +
421                                           block_group->key.offset);
422         caching_ctl->progress = (u64)-1;
423
424         spin_lock(&block_group->lock);
425         block_group->caching_ctl = NULL;
426         block_group->cached = BTRFS_CACHE_FINISHED;
427         spin_unlock(&block_group->lock);
428
429 err:
430         btrfs_free_path(path);
431         up_read(&fs_info->extent_commit_sem);
432
433         free_excluded_extents(extent_root, block_group);
434
435         mutex_unlock(&caching_ctl->mutex);
436         wake_up(&caching_ctl->wait);
437
438         put_caching_control(caching_ctl);
439         atomic_dec(&block_group->space_info->caching_threads);
440         btrfs_put_block_group(block_group);
441
442         return 0;
443 }
444
445 static int cache_block_group(struct btrfs_block_group_cache *cache,
446                              struct btrfs_trans_handle *trans,
447                              struct btrfs_root *root,
448                              int load_cache_only)
449 {
450         struct btrfs_fs_info *fs_info = cache->fs_info;
451         struct btrfs_caching_control *caching_ctl;
452         struct task_struct *tsk;
453         int ret = 0;
454
455         smp_mb();
456         if (cache->cached != BTRFS_CACHE_NO)
457                 return 0;
458
459         /*
460          * We can't do the read from on-disk cache during a commit since we need
461          * to have the normal tree locking.  Also if we are currently trying to
462          * allocate blocks for the tree root we can't do the fast caching since
463          * we likely hold important locks.
464          */
465         if (trans && (!trans->transaction->in_commit) &&
466             (root && root != root->fs_info->tree_root)) {
467                 spin_lock(&cache->lock);
468                 if (cache->cached != BTRFS_CACHE_NO) {
469                         spin_unlock(&cache->lock);
470                         return 0;
471                 }
472                 cache->cached = BTRFS_CACHE_STARTED;
473                 spin_unlock(&cache->lock);
474
475                 ret = load_free_space_cache(fs_info, cache);
476
477                 spin_lock(&cache->lock);
478                 if (ret == 1) {
479                         cache->cached = BTRFS_CACHE_FINISHED;
480                         cache->last_byte_to_unpin = (u64)-1;
481                 } else {
482                         cache->cached = BTRFS_CACHE_NO;
483                 }
484                 spin_unlock(&cache->lock);
485                 if (ret == 1) {
486                         free_excluded_extents(fs_info->extent_root, cache);
487                         return 0;
488                 }
489         }
490
491         if (load_cache_only)
492                 return 0;
493
494         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
495         BUG_ON(!caching_ctl);
496
497         INIT_LIST_HEAD(&caching_ctl->list);
498         mutex_init(&caching_ctl->mutex);
499         init_waitqueue_head(&caching_ctl->wait);
500         caching_ctl->block_group = cache;
501         caching_ctl->progress = cache->key.objectid;
502         /* one for caching kthread, one for caching block group list */
503         atomic_set(&caching_ctl->count, 2);
504
505         spin_lock(&cache->lock);
506         if (cache->cached != BTRFS_CACHE_NO) {
507                 spin_unlock(&cache->lock);
508                 kfree(caching_ctl);
509                 return 0;
510         }
511         cache->caching_ctl = caching_ctl;
512         cache->cached = BTRFS_CACHE_STARTED;
513         spin_unlock(&cache->lock);
514
515         down_write(&fs_info->extent_commit_sem);
516         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
517         up_write(&fs_info->extent_commit_sem);
518
519         atomic_inc(&cache->space_info->caching_threads);
520         btrfs_get_block_group(cache);
521
522         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
523                           cache->key.objectid);
524         if (IS_ERR(tsk)) {
525                 ret = PTR_ERR(tsk);
526                 printk(KERN_ERR "error running thread %d\n", ret);
527                 BUG();
528         }
529
530         return ret;
531 }
532
533 /*
534  * return the block group that starts at or after bytenr
535  */
536 static struct btrfs_block_group_cache *
537 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
538 {
539         struct btrfs_block_group_cache *cache;
540
541         cache = block_group_cache_tree_search(info, bytenr, 0);
542
543         return cache;
544 }
545
546 /*
547  * return the block group that contains the given bytenr
548  */
549 struct btrfs_block_group_cache *btrfs_lookup_block_group(
550                                                  struct btrfs_fs_info *info,
551                                                  u64 bytenr)
552 {
553         struct btrfs_block_group_cache *cache;
554
555         cache = block_group_cache_tree_search(info, bytenr, 1);
556
557         return cache;
558 }
559
560 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
561                                                   u64 flags)
562 {
563         struct list_head *head = &info->space_info;
564         struct btrfs_space_info *found;
565
566         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
567                  BTRFS_BLOCK_GROUP_METADATA;
568
569         rcu_read_lock();
570         list_for_each_entry_rcu(found, head, list) {
571                 if (found->flags & flags) {
572                         rcu_read_unlock();
573                         return found;
574                 }
575         }
576         rcu_read_unlock();
577         return NULL;
578 }
579
580 /*
581  * after adding space to the filesystem, we need to clear the full flags
582  * on all the space infos.
583  */
584 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
585 {
586         struct list_head *head = &info->space_info;
587         struct btrfs_space_info *found;
588
589         rcu_read_lock();
590         list_for_each_entry_rcu(found, head, list)
591                 found->full = 0;
592         rcu_read_unlock();
593 }
594
595 static u64 div_factor(u64 num, int factor)
596 {
597         if (factor == 10)
598                 return num;
599         num *= factor;
600         do_div(num, 10);
601         return num;
602 }
603
604 static u64 div_factor_fine(u64 num, int factor)
605 {
606         if (factor == 100)
607                 return num;
608         num *= factor;
609         do_div(num, 100);
610         return num;
611 }
612
613 u64 btrfs_find_block_group(struct btrfs_root *root,
614                            u64 search_start, u64 search_hint, int owner)
615 {
616         struct btrfs_block_group_cache *cache;
617         u64 used;
618         u64 last = max(search_hint, search_start);
619         u64 group_start = 0;
620         int full_search = 0;
621         int factor = 9;
622         int wrapped = 0;
623 again:
624         while (1) {
625                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
626                 if (!cache)
627                         break;
628
629                 spin_lock(&cache->lock);
630                 last = cache->key.objectid + cache->key.offset;
631                 used = btrfs_block_group_used(&cache->item);
632
633                 if ((full_search || !cache->ro) &&
634                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
635                         if (used + cache->pinned + cache->reserved <
636                             div_factor(cache->key.offset, factor)) {
637                                 group_start = cache->key.objectid;
638                                 spin_unlock(&cache->lock);
639                                 btrfs_put_block_group(cache);
640                                 goto found;
641                         }
642                 }
643                 spin_unlock(&cache->lock);
644                 btrfs_put_block_group(cache);
645                 cond_resched();
646         }
647         if (!wrapped) {
648                 last = search_start;
649                 wrapped = 1;
650                 goto again;
651         }
652         if (!full_search && factor < 10) {
653                 last = search_start;
654                 full_search = 1;
655                 factor = 10;
656                 goto again;
657         }
658 found:
659         return group_start;
660 }
661
662 /* simple helper to search for an existing extent at a given offset */
663 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
664 {
665         int ret;
666         struct btrfs_key key;
667         struct btrfs_path *path;
668
669         path = btrfs_alloc_path();
670         if (!path)
671                 return -ENOMEM;
672
673         key.objectid = start;
674         key.offset = len;
675         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
676         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
677                                 0, 0);
678         btrfs_free_path(path);
679         return ret;
680 }
681
682 /*
683  * helper function to lookup reference count and flags of extent.
684  *
685  * the head node for delayed ref is used to store the sum of all the
686  * reference count modifications queued up in the rbtree. the head
687  * node may also store the extent flags to set. This way you can check
688  * to see what the reference count and extent flags would be if all of
689  * the delayed refs are not processed.
690  */
691 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
692                              struct btrfs_root *root, u64 bytenr,
693                              u64 num_bytes, u64 *refs, u64 *flags)
694 {
695         struct btrfs_delayed_ref_head *head;
696         struct btrfs_delayed_ref_root *delayed_refs;
697         struct btrfs_path *path;
698         struct btrfs_extent_item *ei;
699         struct extent_buffer *leaf;
700         struct btrfs_key key;
701         u32 item_size;
702         u64 num_refs;
703         u64 extent_flags;
704         int ret;
705
706         path = btrfs_alloc_path();
707         if (!path)
708                 return -ENOMEM;
709
710         key.objectid = bytenr;
711         key.type = BTRFS_EXTENT_ITEM_KEY;
712         key.offset = num_bytes;
713         if (!trans) {
714                 path->skip_locking = 1;
715                 path->search_commit_root = 1;
716         }
717 again:
718         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
719                                 &key, path, 0, 0);
720         if (ret < 0)
721                 goto out_free;
722
723         if (ret == 0) {
724                 leaf = path->nodes[0];
725                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
726                 if (item_size >= sizeof(*ei)) {
727                         ei = btrfs_item_ptr(leaf, path->slots[0],
728                                             struct btrfs_extent_item);
729                         num_refs = btrfs_extent_refs(leaf, ei);
730                         extent_flags = btrfs_extent_flags(leaf, ei);
731                 } else {
732 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
733                         struct btrfs_extent_item_v0 *ei0;
734                         BUG_ON(item_size != sizeof(*ei0));
735                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
736                                              struct btrfs_extent_item_v0);
737                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
738                         /* FIXME: this isn't correct for data */
739                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
740 #else
741                         BUG();
742 #endif
743                 }
744                 BUG_ON(num_refs == 0);
745         } else {
746                 num_refs = 0;
747                 extent_flags = 0;
748                 ret = 0;
749         }
750
751         if (!trans)
752                 goto out;
753
754         delayed_refs = &trans->transaction->delayed_refs;
755         spin_lock(&delayed_refs->lock);
756         head = btrfs_find_delayed_ref_head(trans, bytenr);
757         if (head) {
758                 if (!mutex_trylock(&head->mutex)) {
759                         atomic_inc(&head->node.refs);
760                         spin_unlock(&delayed_refs->lock);
761
762                         btrfs_release_path(path);
763
764                         /*
765                          * Mutex was contended, block until it's released and try
766                          * again
767                          */
768                         mutex_lock(&head->mutex);
769                         mutex_unlock(&head->mutex);
770                         btrfs_put_delayed_ref(&head->node);
771                         goto again;
772                 }
773                 if (head->extent_op && head->extent_op->update_flags)
774                         extent_flags |= head->extent_op->flags_to_set;
775                 else
776                         BUG_ON(num_refs == 0);
777
778                 num_refs += head->node.ref_mod;
779                 mutex_unlock(&head->mutex);
780         }
781         spin_unlock(&delayed_refs->lock);
782 out:
783         WARN_ON(num_refs == 0);
784         if (refs)
785                 *refs = num_refs;
786         if (flags)
787                 *flags = extent_flags;
788 out_free:
789         btrfs_free_path(path);
790         return ret;
791 }
792
793 /*
794  * Back reference rules.  Back refs have three main goals:
795  *
796  * 1) differentiate between all holders of references to an extent so that
797  *    when a reference is dropped we can make sure it was a valid reference
798  *    before freeing the extent.
799  *
800  * 2) Provide enough information to quickly find the holders of an extent
801  *    if we notice a given block is corrupted or bad.
802  *
803  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
804  *    maintenance.  This is actually the same as #2, but with a slightly
805  *    different use case.
806  *
807  * There are two kinds of back refs. The implicit back refs is optimized
808  * for pointers in non-shared tree blocks. For a given pointer in a block,
809  * back refs of this kind provide information about the block's owner tree
810  * and the pointer's key. These information allow us to find the block by
811  * b-tree searching. The full back refs is for pointers in tree blocks not
812  * referenced by their owner trees. The location of tree block is recorded
813  * in the back refs. Actually the full back refs is generic, and can be
814  * used in all cases the implicit back refs is used. The major shortcoming
815  * of the full back refs is its overhead. Every time a tree block gets
816  * COWed, we have to update back refs entry for all pointers in it.
817  *
818  * For a newly allocated tree block, we use implicit back refs for
819  * pointers in it. This means most tree related operations only involve
820  * implicit back refs. For a tree block created in old transaction, the
821  * only way to drop a reference to it is COW it. So we can detect the
822  * event that tree block loses its owner tree's reference and do the
823  * back refs conversion.
824  *
825  * When a tree block is COW'd through a tree, there are four cases:
826  *
827  * The reference count of the block is one and the tree is the block's
828  * owner tree. Nothing to do in this case.
829  *
830  * The reference count of the block is one and the tree is not the
831  * block's owner tree. In this case, full back refs is used for pointers
832  * in the block. Remove these full back refs, add implicit back refs for
833  * every pointers in the new block.
834  *
835  * The reference count of the block is greater than one and the tree is
836  * the block's owner tree. In this case, implicit back refs is used for
837  * pointers in the block. Add full back refs for every pointers in the
838  * block, increase lower level extents' reference counts. The original
839  * implicit back refs are entailed to the new block.
840  *
841  * The reference count of the block is greater than one and the tree is
842  * not the block's owner tree. Add implicit back refs for every pointer in
843  * the new block, increase lower level extents' reference count.
844  *
845  * Back Reference Key composing:
846  *
847  * The key objectid corresponds to the first byte in the extent,
848  * The key type is used to differentiate between types of back refs.
849  * There are different meanings of the key offset for different types
850  * of back refs.
851  *
852  * File extents can be referenced by:
853  *
854  * - multiple snapshots, subvolumes, or different generations in one subvol
855  * - different files inside a single subvolume
856  * - different offsets inside a file (bookend extents in file.c)
857  *
858  * The extent ref structure for the implicit back refs has fields for:
859  *
860  * - Objectid of the subvolume root
861  * - objectid of the file holding the reference
862  * - original offset in the file
863  * - how many bookend extents
864  *
865  * The key offset for the implicit back refs is hash of the first
866  * three fields.
867  *
868  * The extent ref structure for the full back refs has field for:
869  *
870  * - number of pointers in the tree leaf
871  *
872  * The key offset for the implicit back refs is the first byte of
873  * the tree leaf
874  *
875  * When a file extent is allocated, The implicit back refs is used.
876  * the fields are filled in:
877  *
878  *     (root_key.objectid, inode objectid, offset in file, 1)
879  *
880  * When a file extent is removed file truncation, we find the
881  * corresponding implicit back refs and check the following fields:
882  *
883  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
884  *
885  * Btree extents can be referenced by:
886  *
887  * - Different subvolumes
888  *
889  * Both the implicit back refs and the full back refs for tree blocks
890  * only consist of key. The key offset for the implicit back refs is
891  * objectid of block's owner tree. The key offset for the full back refs
892  * is the first byte of parent block.
893  *
894  * When implicit back refs is used, information about the lowest key and
895  * level of the tree block are required. These information are stored in
896  * tree block info structure.
897  */
898
899 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
900 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
901                                   struct btrfs_root *root,
902                                   struct btrfs_path *path,
903                                   u64 owner, u32 extra_size)
904 {
905         struct btrfs_extent_item *item;
906         struct btrfs_extent_item_v0 *ei0;
907         struct btrfs_extent_ref_v0 *ref0;
908         struct btrfs_tree_block_info *bi;
909         struct extent_buffer *leaf;
910         struct btrfs_key key;
911         struct btrfs_key found_key;
912         u32 new_size = sizeof(*item);
913         u64 refs;
914         int ret;
915
916         leaf = path->nodes[0];
917         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
918
919         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
920         ei0 = btrfs_item_ptr(leaf, path->slots[0],
921                              struct btrfs_extent_item_v0);
922         refs = btrfs_extent_refs_v0(leaf, ei0);
923
924         if (owner == (u64)-1) {
925                 while (1) {
926                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
927                                 ret = btrfs_next_leaf(root, path);
928                                 if (ret < 0)
929                                         return ret;
930                                 BUG_ON(ret > 0);
931                                 leaf = path->nodes[0];
932                         }
933                         btrfs_item_key_to_cpu(leaf, &found_key,
934                                               path->slots[0]);
935                         BUG_ON(key.objectid != found_key.objectid);
936                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
937                                 path->slots[0]++;
938                                 continue;
939                         }
940                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
941                                               struct btrfs_extent_ref_v0);
942                         owner = btrfs_ref_objectid_v0(leaf, ref0);
943                         break;
944                 }
945         }
946         btrfs_release_path(path);
947
948         if (owner < BTRFS_FIRST_FREE_OBJECTID)
949                 new_size += sizeof(*bi);
950
951         new_size -= sizeof(*ei0);
952         ret = btrfs_search_slot(trans, root, &key, path,
953                                 new_size + extra_size, 1);
954         if (ret < 0)
955                 return ret;
956         BUG_ON(ret);
957
958         ret = btrfs_extend_item(trans, root, path, new_size);
959
960         leaf = path->nodes[0];
961         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
962         btrfs_set_extent_refs(leaf, item, refs);
963         /* FIXME: get real generation */
964         btrfs_set_extent_generation(leaf, item, 0);
965         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
966                 btrfs_set_extent_flags(leaf, item,
967                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
968                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
969                 bi = (struct btrfs_tree_block_info *)(item + 1);
970                 /* FIXME: get first key of the block */
971                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
972                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
973         } else {
974                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
975         }
976         btrfs_mark_buffer_dirty(leaf);
977         return 0;
978 }
979 #endif
980
981 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
982 {
983         u32 high_crc = ~(u32)0;
984         u32 low_crc = ~(u32)0;
985         __le64 lenum;
986
987         lenum = cpu_to_le64(root_objectid);
988         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
989         lenum = cpu_to_le64(owner);
990         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
991         lenum = cpu_to_le64(offset);
992         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
993
994         return ((u64)high_crc << 31) ^ (u64)low_crc;
995 }
996
997 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
998                                      struct btrfs_extent_data_ref *ref)
999 {
1000         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1001                                     btrfs_extent_data_ref_objectid(leaf, ref),
1002                                     btrfs_extent_data_ref_offset(leaf, ref));
1003 }
1004
1005 static int match_extent_data_ref(struct extent_buffer *leaf,
1006                                  struct btrfs_extent_data_ref *ref,
1007                                  u64 root_objectid, u64 owner, u64 offset)
1008 {
1009         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1010             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1011             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1012                 return 0;
1013         return 1;
1014 }
1015
1016 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1017                                            struct btrfs_root *root,
1018                                            struct btrfs_path *path,
1019                                            u64 bytenr, u64 parent,
1020                                            u64 root_objectid,
1021                                            u64 owner, u64 offset)
1022 {
1023         struct btrfs_key key;
1024         struct btrfs_extent_data_ref *ref;
1025         struct extent_buffer *leaf;
1026         u32 nritems;
1027         int ret;
1028         int recow;
1029         int err = -ENOENT;
1030
1031         key.objectid = bytenr;
1032         if (parent) {
1033                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1034                 key.offset = parent;
1035         } else {
1036                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1037                 key.offset = hash_extent_data_ref(root_objectid,
1038                                                   owner, offset);
1039         }
1040 again:
1041         recow = 0;
1042         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1043         if (ret < 0) {
1044                 err = ret;
1045                 goto fail;
1046         }
1047
1048         if (parent) {
1049                 if (!ret)
1050                         return 0;
1051 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1052                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1053                 btrfs_release_path(path);
1054                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1055                 if (ret < 0) {
1056                         err = ret;
1057                         goto fail;
1058                 }
1059                 if (!ret)
1060                         return 0;
1061 #endif
1062                 goto fail;
1063         }
1064
1065         leaf = path->nodes[0];
1066         nritems = btrfs_header_nritems(leaf);
1067         while (1) {
1068                 if (path->slots[0] >= nritems) {
1069                         ret = btrfs_next_leaf(root, path);
1070                         if (ret < 0)
1071                                 err = ret;
1072                         if (ret)
1073                                 goto fail;
1074
1075                         leaf = path->nodes[0];
1076                         nritems = btrfs_header_nritems(leaf);
1077                         recow = 1;
1078                 }
1079
1080                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1081                 if (key.objectid != bytenr ||
1082                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1083                         goto fail;
1084
1085                 ref = btrfs_item_ptr(leaf, path->slots[0],
1086                                      struct btrfs_extent_data_ref);
1087
1088                 if (match_extent_data_ref(leaf, ref, root_objectid,
1089                                           owner, offset)) {
1090                         if (recow) {
1091                                 btrfs_release_path(path);
1092                                 goto again;
1093                         }
1094                         err = 0;
1095                         break;
1096                 }
1097                 path->slots[0]++;
1098         }
1099 fail:
1100         return err;
1101 }
1102
1103 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1104                                            struct btrfs_root *root,
1105                                            struct btrfs_path *path,
1106                                            u64 bytenr, u64 parent,
1107                                            u64 root_objectid, u64 owner,
1108                                            u64 offset, int refs_to_add)
1109 {
1110         struct btrfs_key key;
1111         struct extent_buffer *leaf;
1112         u32 size;
1113         u32 num_refs;
1114         int ret;
1115
1116         key.objectid = bytenr;
1117         if (parent) {
1118                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1119                 key.offset = parent;
1120                 size = sizeof(struct btrfs_shared_data_ref);
1121         } else {
1122                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1123                 key.offset = hash_extent_data_ref(root_objectid,
1124                                                   owner, offset);
1125                 size = sizeof(struct btrfs_extent_data_ref);
1126         }
1127
1128         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1129         if (ret && ret != -EEXIST)
1130                 goto fail;
1131
1132         leaf = path->nodes[0];
1133         if (parent) {
1134                 struct btrfs_shared_data_ref *ref;
1135                 ref = btrfs_item_ptr(leaf, path->slots[0],
1136                                      struct btrfs_shared_data_ref);
1137                 if (ret == 0) {
1138                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1139                 } else {
1140                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1141                         num_refs += refs_to_add;
1142                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1143                 }
1144         } else {
1145                 struct btrfs_extent_data_ref *ref;
1146                 while (ret == -EEXIST) {
1147                         ref = btrfs_item_ptr(leaf, path->slots[0],
1148                                              struct btrfs_extent_data_ref);
1149                         if (match_extent_data_ref(leaf, ref, root_objectid,
1150                                                   owner, offset))
1151                                 break;
1152                         btrfs_release_path(path);
1153                         key.offset++;
1154                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1155                                                       size);
1156                         if (ret && ret != -EEXIST)
1157                                 goto fail;
1158
1159                         leaf = path->nodes[0];
1160                 }
1161                 ref = btrfs_item_ptr(leaf, path->slots[0],
1162                                      struct btrfs_extent_data_ref);
1163                 if (ret == 0) {
1164                         btrfs_set_extent_data_ref_root(leaf, ref,
1165                                                        root_objectid);
1166                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1167                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1168                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1169                 } else {
1170                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1171                         num_refs += refs_to_add;
1172                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1173                 }
1174         }
1175         btrfs_mark_buffer_dirty(leaf);
1176         ret = 0;
1177 fail:
1178         btrfs_release_path(path);
1179         return ret;
1180 }
1181
1182 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            int refs_to_drop)
1186 {
1187         struct btrfs_key key;
1188         struct btrfs_extent_data_ref *ref1 = NULL;
1189         struct btrfs_shared_data_ref *ref2 = NULL;
1190         struct extent_buffer *leaf;
1191         u32 num_refs = 0;
1192         int ret = 0;
1193
1194         leaf = path->nodes[0];
1195         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1196
1197         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1198                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1199                                       struct btrfs_extent_data_ref);
1200                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1201         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1202                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1203                                       struct btrfs_shared_data_ref);
1204                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1205 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1206         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1207                 struct btrfs_extent_ref_v0 *ref0;
1208                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1209                                       struct btrfs_extent_ref_v0);
1210                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1211 #endif
1212         } else {
1213                 BUG();
1214         }
1215
1216         BUG_ON(num_refs < refs_to_drop);
1217         num_refs -= refs_to_drop;
1218
1219         if (num_refs == 0) {
1220                 ret = btrfs_del_item(trans, root, path);
1221         } else {
1222                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1223                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1224                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1225                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1226 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1227                 else {
1228                         struct btrfs_extent_ref_v0 *ref0;
1229                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1230                                         struct btrfs_extent_ref_v0);
1231                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1232                 }
1233 #endif
1234                 btrfs_mark_buffer_dirty(leaf);
1235         }
1236         return ret;
1237 }
1238
1239 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1240                                           struct btrfs_path *path,
1241                                           struct btrfs_extent_inline_ref *iref)
1242 {
1243         struct btrfs_key key;
1244         struct extent_buffer *leaf;
1245         struct btrfs_extent_data_ref *ref1;
1246         struct btrfs_shared_data_ref *ref2;
1247         u32 num_refs = 0;
1248
1249         leaf = path->nodes[0];
1250         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1251         if (iref) {
1252                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1253                     BTRFS_EXTENT_DATA_REF_KEY) {
1254                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1255                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1256                 } else {
1257                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1258                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259                 }
1260         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1261                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1262                                       struct btrfs_extent_data_ref);
1263                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1264         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1265                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1266                                       struct btrfs_shared_data_ref);
1267                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1268 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1269         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1270                 struct btrfs_extent_ref_v0 *ref0;
1271                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1272                                       struct btrfs_extent_ref_v0);
1273                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1274 #endif
1275         } else {
1276                 WARN_ON(1);
1277         }
1278         return num_refs;
1279 }
1280
1281 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1282                                           struct btrfs_root *root,
1283                                           struct btrfs_path *path,
1284                                           u64 bytenr, u64 parent,
1285                                           u64 root_objectid)
1286 {
1287         struct btrfs_key key;
1288         int ret;
1289
1290         key.objectid = bytenr;
1291         if (parent) {
1292                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1293                 key.offset = parent;
1294         } else {
1295                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1296                 key.offset = root_objectid;
1297         }
1298
1299         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1300         if (ret > 0)
1301                 ret = -ENOENT;
1302 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1303         if (ret == -ENOENT && parent) {
1304                 btrfs_release_path(path);
1305                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1306                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1307                 if (ret > 0)
1308                         ret = -ENOENT;
1309         }
1310 #endif
1311         return ret;
1312 }
1313
1314 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1315                                           struct btrfs_root *root,
1316                                           struct btrfs_path *path,
1317                                           u64 bytenr, u64 parent,
1318                                           u64 root_objectid)
1319 {
1320         struct btrfs_key key;
1321         int ret;
1322
1323         key.objectid = bytenr;
1324         if (parent) {
1325                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1326                 key.offset = parent;
1327         } else {
1328                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1329                 key.offset = root_objectid;
1330         }
1331
1332         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1333         btrfs_release_path(path);
1334         return ret;
1335 }
1336
1337 static inline int extent_ref_type(u64 parent, u64 owner)
1338 {
1339         int type;
1340         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1341                 if (parent > 0)
1342                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1343                 else
1344                         type = BTRFS_TREE_BLOCK_REF_KEY;
1345         } else {
1346                 if (parent > 0)
1347                         type = BTRFS_SHARED_DATA_REF_KEY;
1348                 else
1349                         type = BTRFS_EXTENT_DATA_REF_KEY;
1350         }
1351         return type;
1352 }
1353
1354 static int find_next_key(struct btrfs_path *path, int level,
1355                          struct btrfs_key *key)
1356
1357 {
1358         for (; level < BTRFS_MAX_LEVEL; level++) {
1359                 if (!path->nodes[level])
1360                         break;
1361                 if (path->slots[level] + 1 >=
1362                     btrfs_header_nritems(path->nodes[level]))
1363                         continue;
1364                 if (level == 0)
1365                         btrfs_item_key_to_cpu(path->nodes[level], key,
1366                                               path->slots[level] + 1);
1367                 else
1368                         btrfs_node_key_to_cpu(path->nodes[level], key,
1369                                               path->slots[level] + 1);
1370                 return 0;
1371         }
1372         return 1;
1373 }
1374
1375 /*
1376  * look for inline back ref. if back ref is found, *ref_ret is set
1377  * to the address of inline back ref, and 0 is returned.
1378  *
1379  * if back ref isn't found, *ref_ret is set to the address where it
1380  * should be inserted, and -ENOENT is returned.
1381  *
1382  * if insert is true and there are too many inline back refs, the path
1383  * points to the extent item, and -EAGAIN is returned.
1384  *
1385  * NOTE: inline back refs are ordered in the same way that back ref
1386  *       items in the tree are ordered.
1387  */
1388 static noinline_for_stack
1389 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1390                                  struct btrfs_root *root,
1391                                  struct btrfs_path *path,
1392                                  struct btrfs_extent_inline_ref **ref_ret,
1393                                  u64 bytenr, u64 num_bytes,
1394                                  u64 parent, u64 root_objectid,
1395                                  u64 owner, u64 offset, int insert)
1396 {
1397         struct btrfs_key key;
1398         struct extent_buffer *leaf;
1399         struct btrfs_extent_item *ei;
1400         struct btrfs_extent_inline_ref *iref;
1401         u64 flags;
1402         u64 item_size;
1403         unsigned long ptr;
1404         unsigned long end;
1405         int extra_size;
1406         int type;
1407         int want;
1408         int ret;
1409         int err = 0;
1410
1411         key.objectid = bytenr;
1412         key.type = BTRFS_EXTENT_ITEM_KEY;
1413         key.offset = num_bytes;
1414
1415         want = extent_ref_type(parent, owner);
1416         if (insert) {
1417                 extra_size = btrfs_extent_inline_ref_size(want);
1418                 path->keep_locks = 1;
1419         } else
1420                 extra_size = -1;
1421         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1422         if (ret < 0) {
1423                 err = ret;
1424                 goto out;
1425         }
1426         BUG_ON(ret);
1427
1428         leaf = path->nodes[0];
1429         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1430 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1431         if (item_size < sizeof(*ei)) {
1432                 if (!insert) {
1433                         err = -ENOENT;
1434                         goto out;
1435                 }
1436                 ret = convert_extent_item_v0(trans, root, path, owner,
1437                                              extra_size);
1438                 if (ret < 0) {
1439                         err = ret;
1440                         goto out;
1441                 }
1442                 leaf = path->nodes[0];
1443                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1444         }
1445 #endif
1446         BUG_ON(item_size < sizeof(*ei));
1447
1448         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1449         flags = btrfs_extent_flags(leaf, ei);
1450
1451         ptr = (unsigned long)(ei + 1);
1452         end = (unsigned long)ei + item_size;
1453
1454         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1455                 ptr += sizeof(struct btrfs_tree_block_info);
1456                 BUG_ON(ptr > end);
1457         } else {
1458                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1459         }
1460
1461         err = -ENOENT;
1462         while (1) {
1463                 if (ptr >= end) {
1464                         WARN_ON(ptr > end);
1465                         break;
1466                 }
1467                 iref = (struct btrfs_extent_inline_ref *)ptr;
1468                 type = btrfs_extent_inline_ref_type(leaf, iref);
1469                 if (want < type)
1470                         break;
1471                 if (want > type) {
1472                         ptr += btrfs_extent_inline_ref_size(type);
1473                         continue;
1474                 }
1475
1476                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1477                         struct btrfs_extent_data_ref *dref;
1478                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1479                         if (match_extent_data_ref(leaf, dref, root_objectid,
1480                                                   owner, offset)) {
1481                                 err = 0;
1482                                 break;
1483                         }
1484                         if (hash_extent_data_ref_item(leaf, dref) <
1485                             hash_extent_data_ref(root_objectid, owner, offset))
1486                                 break;
1487                 } else {
1488                         u64 ref_offset;
1489                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1490                         if (parent > 0) {
1491                                 if (parent == ref_offset) {
1492                                         err = 0;
1493                                         break;
1494                                 }
1495                                 if (ref_offset < parent)
1496                                         break;
1497                         } else {
1498                                 if (root_objectid == ref_offset) {
1499                                         err = 0;
1500                                         break;
1501                                 }
1502                                 if (ref_offset < root_objectid)
1503                                         break;
1504                         }
1505                 }
1506                 ptr += btrfs_extent_inline_ref_size(type);
1507         }
1508         if (err == -ENOENT && insert) {
1509                 if (item_size + extra_size >=
1510                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1511                         err = -EAGAIN;
1512                         goto out;
1513                 }
1514                 /*
1515                  * To add new inline back ref, we have to make sure
1516                  * there is no corresponding back ref item.
1517                  * For simplicity, we just do not add new inline back
1518                  * ref if there is any kind of item for this block
1519                  */
1520                 if (find_next_key(path, 0, &key) == 0 &&
1521                     key.objectid == bytenr &&
1522                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1523                         err = -EAGAIN;
1524                         goto out;
1525                 }
1526         }
1527         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1528 out:
1529         if (insert) {
1530                 path->keep_locks = 0;
1531                 btrfs_unlock_up_safe(path, 1);
1532         }
1533         return err;
1534 }
1535
1536 /*
1537  * helper to add new inline back ref
1538  */
1539 static noinline_for_stack
1540 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1541                                 struct btrfs_root *root,
1542                                 struct btrfs_path *path,
1543                                 struct btrfs_extent_inline_ref *iref,
1544                                 u64 parent, u64 root_objectid,
1545                                 u64 owner, u64 offset, int refs_to_add,
1546                                 struct btrfs_delayed_extent_op *extent_op)
1547 {
1548         struct extent_buffer *leaf;
1549         struct btrfs_extent_item *ei;
1550         unsigned long ptr;
1551         unsigned long end;
1552         unsigned long item_offset;
1553         u64 refs;
1554         int size;
1555         int type;
1556         int ret;
1557
1558         leaf = path->nodes[0];
1559         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1560         item_offset = (unsigned long)iref - (unsigned long)ei;
1561
1562         type = extent_ref_type(parent, owner);
1563         size = btrfs_extent_inline_ref_size(type);
1564
1565         ret = btrfs_extend_item(trans, root, path, size);
1566
1567         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1568         refs = btrfs_extent_refs(leaf, ei);
1569         refs += refs_to_add;
1570         btrfs_set_extent_refs(leaf, ei, refs);
1571         if (extent_op)
1572                 __run_delayed_extent_op(extent_op, leaf, ei);
1573
1574         ptr = (unsigned long)ei + item_offset;
1575         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1576         if (ptr < end - size)
1577                 memmove_extent_buffer(leaf, ptr + size, ptr,
1578                                       end - size - ptr);
1579
1580         iref = (struct btrfs_extent_inline_ref *)ptr;
1581         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1582         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1583                 struct btrfs_extent_data_ref *dref;
1584                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1585                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1586                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1587                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1588                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1589         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1590                 struct btrfs_shared_data_ref *sref;
1591                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1592                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1593                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1594         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1595                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1596         } else {
1597                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1598         }
1599         btrfs_mark_buffer_dirty(leaf);
1600         return 0;
1601 }
1602
1603 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1604                                  struct btrfs_root *root,
1605                                  struct btrfs_path *path,
1606                                  struct btrfs_extent_inline_ref **ref_ret,
1607                                  u64 bytenr, u64 num_bytes, u64 parent,
1608                                  u64 root_objectid, u64 owner, u64 offset)
1609 {
1610         int ret;
1611
1612         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1613                                            bytenr, num_bytes, parent,
1614                                            root_objectid, owner, offset, 0);
1615         if (ret != -ENOENT)
1616                 return ret;
1617
1618         btrfs_release_path(path);
1619         *ref_ret = NULL;
1620
1621         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1622                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1623                                             root_objectid);
1624         } else {
1625                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1626                                              root_objectid, owner, offset);
1627         }
1628         return ret;
1629 }
1630
1631 /*
1632  * helper to update/remove inline back ref
1633  */
1634 static noinline_for_stack
1635 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1636                                  struct btrfs_root *root,
1637                                  struct btrfs_path *path,
1638                                  struct btrfs_extent_inline_ref *iref,
1639                                  int refs_to_mod,
1640                                  struct btrfs_delayed_extent_op *extent_op)
1641 {
1642         struct extent_buffer *leaf;
1643         struct btrfs_extent_item *ei;
1644         struct btrfs_extent_data_ref *dref = NULL;
1645         struct btrfs_shared_data_ref *sref = NULL;
1646         unsigned long ptr;
1647         unsigned long end;
1648         u32 item_size;
1649         int size;
1650         int type;
1651         int ret;
1652         u64 refs;
1653
1654         leaf = path->nodes[0];
1655         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1656         refs = btrfs_extent_refs(leaf, ei);
1657         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1658         refs += refs_to_mod;
1659         btrfs_set_extent_refs(leaf, ei, refs);
1660         if (extent_op)
1661                 __run_delayed_extent_op(extent_op, leaf, ei);
1662
1663         type = btrfs_extent_inline_ref_type(leaf, iref);
1664
1665         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1666                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1667                 refs = btrfs_extent_data_ref_count(leaf, dref);
1668         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1669                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1670                 refs = btrfs_shared_data_ref_count(leaf, sref);
1671         } else {
1672                 refs = 1;
1673                 BUG_ON(refs_to_mod != -1);
1674         }
1675
1676         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1677         refs += refs_to_mod;
1678
1679         if (refs > 0) {
1680                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1681                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1682                 else
1683                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1684         } else {
1685                 size =  btrfs_extent_inline_ref_size(type);
1686                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1687                 ptr = (unsigned long)iref;
1688                 end = (unsigned long)ei + item_size;
1689                 if (ptr + size < end)
1690                         memmove_extent_buffer(leaf, ptr, ptr + size,
1691                                               end - ptr - size);
1692                 item_size -= size;
1693                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1694         }
1695         btrfs_mark_buffer_dirty(leaf);
1696         return 0;
1697 }
1698
1699 static noinline_for_stack
1700 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1701                                  struct btrfs_root *root,
1702                                  struct btrfs_path *path,
1703                                  u64 bytenr, u64 num_bytes, u64 parent,
1704                                  u64 root_objectid, u64 owner,
1705                                  u64 offset, int refs_to_add,
1706                                  struct btrfs_delayed_extent_op *extent_op)
1707 {
1708         struct btrfs_extent_inline_ref *iref;
1709         int ret;
1710
1711         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1712                                            bytenr, num_bytes, parent,
1713                                            root_objectid, owner, offset, 1);
1714         if (ret == 0) {
1715                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1716                 ret = update_inline_extent_backref(trans, root, path, iref,
1717                                                    refs_to_add, extent_op);
1718         } else if (ret == -ENOENT) {
1719                 ret = setup_inline_extent_backref(trans, root, path, iref,
1720                                                   parent, root_objectid,
1721                                                   owner, offset, refs_to_add,
1722                                                   extent_op);
1723         }
1724         return ret;
1725 }
1726
1727 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1728                                  struct btrfs_root *root,
1729                                  struct btrfs_path *path,
1730                                  u64 bytenr, u64 parent, u64 root_objectid,
1731                                  u64 owner, u64 offset, int refs_to_add)
1732 {
1733         int ret;
1734         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1735                 BUG_ON(refs_to_add != 1);
1736                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1737                                             parent, root_objectid);
1738         } else {
1739                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1740                                              parent, root_objectid,
1741                                              owner, offset, refs_to_add);
1742         }
1743         return ret;
1744 }
1745
1746 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1747                                  struct btrfs_root *root,
1748                                  struct btrfs_path *path,
1749                                  struct btrfs_extent_inline_ref *iref,
1750                                  int refs_to_drop, int is_data)
1751 {
1752         int ret;
1753
1754         BUG_ON(!is_data && refs_to_drop != 1);
1755         if (iref) {
1756                 ret = update_inline_extent_backref(trans, root, path, iref,
1757                                                    -refs_to_drop, NULL);
1758         } else if (is_data) {
1759                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1760         } else {
1761                 ret = btrfs_del_item(trans, root, path);
1762         }
1763         return ret;
1764 }
1765
1766 static int btrfs_issue_discard(struct block_device *bdev,
1767                                 u64 start, u64 len)
1768 {
1769         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1770 }
1771
1772 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1773                                 u64 num_bytes, u64 *actual_bytes)
1774 {
1775         int ret;
1776         u64 discarded_bytes = 0;
1777         struct btrfs_multi_bio *multi = NULL;
1778
1779
1780         /* Tell the block device(s) that the sectors can be discarded */
1781         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1782                               bytenr, &num_bytes, &multi, 0);
1783         if (!ret) {
1784                 struct btrfs_bio_stripe *stripe = multi->stripes;
1785                 int i;
1786
1787
1788                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1789                         ret = btrfs_issue_discard(stripe->dev->bdev,
1790                                                   stripe->physical,
1791                                                   stripe->length);
1792                         if (!ret)
1793                                 discarded_bytes += stripe->length;
1794                         else if (ret != -EOPNOTSUPP)
1795                                 break;
1796                 }
1797                 kfree(multi);
1798         }
1799         if (discarded_bytes && ret == -EOPNOTSUPP)
1800                 ret = 0;
1801
1802         if (actual_bytes)
1803                 *actual_bytes = discarded_bytes;
1804
1805
1806         return ret;
1807 }
1808
1809 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1810                          struct btrfs_root *root,
1811                          u64 bytenr, u64 num_bytes, u64 parent,
1812                          u64 root_objectid, u64 owner, u64 offset)
1813 {
1814         int ret;
1815         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1816                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1817
1818         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1819                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1820                                         parent, root_objectid, (int)owner,
1821                                         BTRFS_ADD_DELAYED_REF, NULL);
1822         } else {
1823                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1824                                         parent, root_objectid, owner, offset,
1825                                         BTRFS_ADD_DELAYED_REF, NULL);
1826         }
1827         return ret;
1828 }
1829
1830 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1831                                   struct btrfs_root *root,
1832                                   u64 bytenr, u64 num_bytes,
1833                                   u64 parent, u64 root_objectid,
1834                                   u64 owner, u64 offset, int refs_to_add,
1835                                   struct btrfs_delayed_extent_op *extent_op)
1836 {
1837         struct btrfs_path *path;
1838         struct extent_buffer *leaf;
1839         struct btrfs_extent_item *item;
1840         u64 refs;
1841         int ret;
1842         int err = 0;
1843
1844         path = btrfs_alloc_path();
1845         if (!path)
1846                 return -ENOMEM;
1847
1848         path->reada = 1;
1849         path->leave_spinning = 1;
1850         /* this will setup the path even if it fails to insert the back ref */
1851         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1852                                            path, bytenr, num_bytes, parent,
1853                                            root_objectid, owner, offset,
1854                                            refs_to_add, extent_op);
1855         if (ret == 0)
1856                 goto out;
1857
1858         if (ret != -EAGAIN) {
1859                 err = ret;
1860                 goto out;
1861         }
1862
1863         leaf = path->nodes[0];
1864         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1865         refs = btrfs_extent_refs(leaf, item);
1866         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1867         if (extent_op)
1868                 __run_delayed_extent_op(extent_op, leaf, item);
1869
1870         btrfs_mark_buffer_dirty(leaf);
1871         btrfs_release_path(path);
1872
1873         path->reada = 1;
1874         path->leave_spinning = 1;
1875
1876         /* now insert the actual backref */
1877         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1878                                     path, bytenr, parent, root_objectid,
1879                                     owner, offset, refs_to_add);
1880         BUG_ON(ret);
1881 out:
1882         btrfs_free_path(path);
1883         return err;
1884 }
1885
1886 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1887                                 struct btrfs_root *root,
1888                                 struct btrfs_delayed_ref_node *node,
1889                                 struct btrfs_delayed_extent_op *extent_op,
1890                                 int insert_reserved)
1891 {
1892         int ret = 0;
1893         struct btrfs_delayed_data_ref *ref;
1894         struct btrfs_key ins;
1895         u64 parent = 0;
1896         u64 ref_root = 0;
1897         u64 flags = 0;
1898
1899         ins.objectid = node->bytenr;
1900         ins.offset = node->num_bytes;
1901         ins.type = BTRFS_EXTENT_ITEM_KEY;
1902
1903         ref = btrfs_delayed_node_to_data_ref(node);
1904         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1905                 parent = ref->parent;
1906         else
1907                 ref_root = ref->root;
1908
1909         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1910                 if (extent_op) {
1911                         BUG_ON(extent_op->update_key);
1912                         flags |= extent_op->flags_to_set;
1913                 }
1914                 ret = alloc_reserved_file_extent(trans, root,
1915                                                  parent, ref_root, flags,
1916                                                  ref->objectid, ref->offset,
1917                                                  &ins, node->ref_mod);
1918         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1919                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1920                                              node->num_bytes, parent,
1921                                              ref_root, ref->objectid,
1922                                              ref->offset, node->ref_mod,
1923                                              extent_op);
1924         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1925                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1926                                           node->num_bytes, parent,
1927                                           ref_root, ref->objectid,
1928                                           ref->offset, node->ref_mod,
1929                                           extent_op);
1930         } else {
1931                 BUG();
1932         }
1933         return ret;
1934 }
1935
1936 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1937                                     struct extent_buffer *leaf,
1938                                     struct btrfs_extent_item *ei)
1939 {
1940         u64 flags = btrfs_extent_flags(leaf, ei);
1941         if (extent_op->update_flags) {
1942                 flags |= extent_op->flags_to_set;
1943                 btrfs_set_extent_flags(leaf, ei, flags);
1944         }
1945
1946         if (extent_op->update_key) {
1947                 struct btrfs_tree_block_info *bi;
1948                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1949                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1950                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1951         }
1952 }
1953
1954 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1955                                  struct btrfs_root *root,
1956                                  struct btrfs_delayed_ref_node *node,
1957                                  struct btrfs_delayed_extent_op *extent_op)
1958 {
1959         struct btrfs_key key;
1960         struct btrfs_path *path;
1961         struct btrfs_extent_item *ei;
1962         struct extent_buffer *leaf;
1963         u32 item_size;
1964         int ret;
1965         int err = 0;
1966
1967         path = btrfs_alloc_path();
1968         if (!path)
1969                 return -ENOMEM;
1970
1971         key.objectid = node->bytenr;
1972         key.type = BTRFS_EXTENT_ITEM_KEY;
1973         key.offset = node->num_bytes;
1974
1975         path->reada = 1;
1976         path->leave_spinning = 1;
1977         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1978                                 path, 0, 1);
1979         if (ret < 0) {
1980                 err = ret;
1981                 goto out;
1982         }
1983         if (ret > 0) {
1984                 err = -EIO;
1985                 goto out;
1986         }
1987
1988         leaf = path->nodes[0];
1989         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1990 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1991         if (item_size < sizeof(*ei)) {
1992                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1993                                              path, (u64)-1, 0);
1994                 if (ret < 0) {
1995                         err = ret;
1996                         goto out;
1997                 }
1998                 leaf = path->nodes[0];
1999                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2000         }
2001 #endif
2002         BUG_ON(item_size < sizeof(*ei));
2003         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2004         __run_delayed_extent_op(extent_op, leaf, ei);
2005
2006         btrfs_mark_buffer_dirty(leaf);
2007 out:
2008         btrfs_free_path(path);
2009         return err;
2010 }
2011
2012 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2013                                 struct btrfs_root *root,
2014                                 struct btrfs_delayed_ref_node *node,
2015                                 struct btrfs_delayed_extent_op *extent_op,
2016                                 int insert_reserved)
2017 {
2018         int ret = 0;
2019         struct btrfs_delayed_tree_ref *ref;
2020         struct btrfs_key ins;
2021         u64 parent = 0;
2022         u64 ref_root = 0;
2023
2024         ins.objectid = node->bytenr;
2025         ins.offset = node->num_bytes;
2026         ins.type = BTRFS_EXTENT_ITEM_KEY;
2027
2028         ref = btrfs_delayed_node_to_tree_ref(node);
2029         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2030                 parent = ref->parent;
2031         else
2032                 ref_root = ref->root;
2033
2034         BUG_ON(node->ref_mod != 1);
2035         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2036                 BUG_ON(!extent_op || !extent_op->update_flags ||
2037                        !extent_op->update_key);
2038                 ret = alloc_reserved_tree_block(trans, root,
2039                                                 parent, ref_root,
2040                                                 extent_op->flags_to_set,
2041                                                 &extent_op->key,
2042                                                 ref->level, &ins);
2043         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2044                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2045                                              node->num_bytes, parent, ref_root,
2046                                              ref->level, 0, 1, extent_op);
2047         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2048                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2049                                           node->num_bytes, parent, ref_root,
2050                                           ref->level, 0, 1, extent_op);
2051         } else {
2052                 BUG();
2053         }
2054         return ret;
2055 }
2056
2057 /* helper function to actually process a single delayed ref entry */
2058 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2059                                struct btrfs_root *root,
2060                                struct btrfs_delayed_ref_node *node,
2061                                struct btrfs_delayed_extent_op *extent_op,
2062                                int insert_reserved)
2063 {
2064         int ret;
2065         if (btrfs_delayed_ref_is_head(node)) {
2066                 struct btrfs_delayed_ref_head *head;
2067                 /*
2068                  * we've hit the end of the chain and we were supposed
2069                  * to insert this extent into the tree.  But, it got
2070                  * deleted before we ever needed to insert it, so all
2071                  * we have to do is clean up the accounting
2072                  */
2073                 BUG_ON(extent_op);
2074                 head = btrfs_delayed_node_to_head(node);
2075                 if (insert_reserved) {
2076                         btrfs_pin_extent(root, node->bytenr,
2077                                          node->num_bytes, 1);
2078                         if (head->is_data) {
2079                                 ret = btrfs_del_csums(trans, root,
2080                                                       node->bytenr,
2081                                                       node->num_bytes);
2082                                 BUG_ON(ret);
2083                         }
2084                 }
2085                 mutex_unlock(&head->mutex);
2086                 return 0;
2087         }
2088
2089         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2090             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2091                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2092                                            insert_reserved);
2093         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2094                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2095                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2096                                            insert_reserved);
2097         else
2098                 BUG();
2099         return ret;
2100 }
2101
2102 static noinline struct btrfs_delayed_ref_node *
2103 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2104 {
2105         struct rb_node *node;
2106         struct btrfs_delayed_ref_node *ref;
2107         int action = BTRFS_ADD_DELAYED_REF;
2108 again:
2109         /*
2110          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2111          * this prevents ref count from going down to zero when
2112          * there still are pending delayed ref.
2113          */
2114         node = rb_prev(&head->node.rb_node);
2115         while (1) {
2116                 if (!node)
2117                         break;
2118                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2119                                 rb_node);
2120                 if (ref->bytenr != head->node.bytenr)
2121                         break;
2122                 if (ref->action == action)
2123                         return ref;
2124                 node = rb_prev(node);
2125         }
2126         if (action == BTRFS_ADD_DELAYED_REF) {
2127                 action = BTRFS_DROP_DELAYED_REF;
2128                 goto again;
2129         }
2130         return NULL;
2131 }
2132
2133 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2134                                        struct btrfs_root *root,
2135                                        struct list_head *cluster)
2136 {
2137         struct btrfs_delayed_ref_root *delayed_refs;
2138         struct btrfs_delayed_ref_node *ref;
2139         struct btrfs_delayed_ref_head *locked_ref = NULL;
2140         struct btrfs_delayed_extent_op *extent_op;
2141         int ret;
2142         int count = 0;
2143         int must_insert_reserved = 0;
2144
2145         delayed_refs = &trans->transaction->delayed_refs;
2146         while (1) {
2147                 if (!locked_ref) {
2148                         /* pick a new head ref from the cluster list */
2149                         if (list_empty(cluster))
2150                                 break;
2151
2152                         locked_ref = list_entry(cluster->next,
2153                                      struct btrfs_delayed_ref_head, cluster);
2154
2155                         /* grab the lock that says we are going to process
2156                          * all the refs for this head */
2157                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2158
2159                         /*
2160                          * we may have dropped the spin lock to get the head
2161                          * mutex lock, and that might have given someone else
2162                          * time to free the head.  If that's true, it has been
2163                          * removed from our list and we can move on.
2164                          */
2165                         if (ret == -EAGAIN) {
2166                                 locked_ref = NULL;
2167                                 count++;
2168                                 continue;
2169                         }
2170                 }
2171
2172                 /*
2173                  * record the must insert reserved flag before we
2174                  * drop the spin lock.
2175                  */
2176                 must_insert_reserved = locked_ref->must_insert_reserved;
2177                 locked_ref->must_insert_reserved = 0;
2178
2179                 extent_op = locked_ref->extent_op;
2180                 locked_ref->extent_op = NULL;
2181
2182                 /*
2183                  * locked_ref is the head node, so we have to go one
2184                  * node back for any delayed ref updates
2185                  */
2186                 ref = select_delayed_ref(locked_ref);
2187                 if (!ref) {
2188                         /* All delayed refs have been processed, Go ahead
2189                          * and send the head node to run_one_delayed_ref,
2190                          * so that any accounting fixes can happen
2191                          */
2192                         ref = &locked_ref->node;
2193
2194                         if (extent_op && must_insert_reserved) {
2195                                 kfree(extent_op);
2196                                 extent_op = NULL;
2197                         }
2198
2199                         if (extent_op) {
2200                                 spin_unlock(&delayed_refs->lock);
2201
2202                                 ret = run_delayed_extent_op(trans, root,
2203                                                             ref, extent_op);
2204                                 BUG_ON(ret);
2205                                 kfree(extent_op);
2206
2207                                 cond_resched();
2208                                 spin_lock(&delayed_refs->lock);
2209                                 continue;
2210                         }
2211
2212                         list_del_init(&locked_ref->cluster);
2213                         locked_ref = NULL;
2214                 }
2215
2216                 ref->in_tree = 0;
2217                 rb_erase(&ref->rb_node, &delayed_refs->root);
2218                 delayed_refs->num_entries--;
2219
2220                 spin_unlock(&delayed_refs->lock);
2221
2222                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2223                                           must_insert_reserved);
2224                 BUG_ON(ret);
2225
2226                 btrfs_put_delayed_ref(ref);
2227                 kfree(extent_op);
2228                 count++;
2229
2230                 cond_resched();
2231                 spin_lock(&delayed_refs->lock);
2232         }
2233         return count;
2234 }
2235
2236 /*
2237  * this starts processing the delayed reference count updates and
2238  * extent insertions we have queued up so far.  count can be
2239  * 0, which means to process everything in the tree at the start
2240  * of the run (but not newly added entries), or it can be some target
2241  * number you'd like to process.
2242  */
2243 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2244                            struct btrfs_root *root, unsigned long count)
2245 {
2246         struct rb_node *node;
2247         struct btrfs_delayed_ref_root *delayed_refs;
2248         struct btrfs_delayed_ref_node *ref;
2249         struct list_head cluster;
2250         int ret;
2251         int run_all = count == (unsigned long)-1;
2252         int run_most = 0;
2253
2254         if (root == root->fs_info->extent_root)
2255                 root = root->fs_info->tree_root;
2256
2257         delayed_refs = &trans->transaction->delayed_refs;
2258         INIT_LIST_HEAD(&cluster);
2259 again:
2260         spin_lock(&delayed_refs->lock);
2261         if (count == 0) {
2262                 count = delayed_refs->num_entries * 2;
2263                 run_most = 1;
2264         }
2265         while (1) {
2266                 if (!(run_all || run_most) &&
2267                     delayed_refs->num_heads_ready < 64)
2268                         break;
2269
2270                 /*
2271                  * go find something we can process in the rbtree.  We start at
2272                  * the beginning of the tree, and then build a cluster
2273                  * of refs to process starting at the first one we are able to
2274                  * lock
2275                  */
2276                 ret = btrfs_find_ref_cluster(trans, &cluster,
2277                                              delayed_refs->run_delayed_start);
2278                 if (ret)
2279                         break;
2280
2281                 ret = run_clustered_refs(trans, root, &cluster);
2282                 BUG_ON(ret < 0);
2283
2284                 count -= min_t(unsigned long, ret, count);
2285
2286                 if (count == 0)
2287                         break;
2288         }
2289
2290         if (run_all) {
2291                 node = rb_first(&delayed_refs->root);
2292                 if (!node)
2293                         goto out;
2294                 count = (unsigned long)-1;
2295
2296                 while (node) {
2297                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2298                                        rb_node);
2299                         if (btrfs_delayed_ref_is_head(ref)) {
2300                                 struct btrfs_delayed_ref_head *head;
2301
2302                                 head = btrfs_delayed_node_to_head(ref);
2303                                 atomic_inc(&ref->refs);
2304
2305                                 spin_unlock(&delayed_refs->lock);
2306                                 /*
2307                                  * Mutex was contended, block until it's
2308                                  * released and try again
2309                                  */
2310                                 mutex_lock(&head->mutex);
2311                                 mutex_unlock(&head->mutex);
2312
2313                                 btrfs_put_delayed_ref(ref);
2314                                 cond_resched();
2315                                 goto again;
2316                         }
2317                         node = rb_next(node);
2318                 }
2319                 spin_unlock(&delayed_refs->lock);
2320                 schedule_timeout(1);
2321                 goto again;
2322         }
2323 out:
2324         spin_unlock(&delayed_refs->lock);
2325         return 0;
2326 }
2327
2328 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2329                                 struct btrfs_root *root,
2330                                 u64 bytenr, u64 num_bytes, u64 flags,
2331                                 int is_data)
2332 {
2333         struct btrfs_delayed_extent_op *extent_op;
2334         int ret;
2335
2336         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2337         if (!extent_op)
2338                 return -ENOMEM;
2339
2340         extent_op->flags_to_set = flags;
2341         extent_op->update_flags = 1;
2342         extent_op->update_key = 0;
2343         extent_op->is_data = is_data ? 1 : 0;
2344
2345         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2346         if (ret)
2347                 kfree(extent_op);
2348         return ret;
2349 }
2350
2351 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2352                                       struct btrfs_root *root,
2353                                       struct btrfs_path *path,
2354                                       u64 objectid, u64 offset, u64 bytenr)
2355 {
2356         struct btrfs_delayed_ref_head *head;
2357         struct btrfs_delayed_ref_node *ref;
2358         struct btrfs_delayed_data_ref *data_ref;
2359         struct btrfs_delayed_ref_root *delayed_refs;
2360         struct rb_node *node;
2361         int ret = 0;
2362
2363         ret = -ENOENT;
2364         delayed_refs = &trans->transaction->delayed_refs;
2365         spin_lock(&delayed_refs->lock);
2366         head = btrfs_find_delayed_ref_head(trans, bytenr);
2367         if (!head)
2368                 goto out;
2369
2370         if (!mutex_trylock(&head->mutex)) {
2371                 atomic_inc(&head->node.refs);
2372                 spin_unlock(&delayed_refs->lock);
2373
2374                 btrfs_release_path(path);
2375
2376                 /*
2377                  * Mutex was contended, block until it's released and let
2378                  * caller try again
2379                  */
2380                 mutex_lock(&head->mutex);
2381                 mutex_unlock(&head->mutex);
2382                 btrfs_put_delayed_ref(&head->node);
2383                 return -EAGAIN;
2384         }
2385
2386         node = rb_prev(&head->node.rb_node);
2387         if (!node)
2388                 goto out_unlock;
2389
2390         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2391
2392         if (ref->bytenr != bytenr)
2393                 goto out_unlock;
2394
2395         ret = 1;
2396         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2397                 goto out_unlock;
2398
2399         data_ref = btrfs_delayed_node_to_data_ref(ref);
2400
2401         node = rb_prev(node);
2402         if (node) {
2403                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2404                 if (ref->bytenr == bytenr)
2405                         goto out_unlock;
2406         }
2407
2408         if (data_ref->root != root->root_key.objectid ||
2409             data_ref->objectid != objectid || data_ref->offset != offset)
2410                 goto out_unlock;
2411
2412         ret = 0;
2413 out_unlock:
2414         mutex_unlock(&head->mutex);
2415 out:
2416         spin_unlock(&delayed_refs->lock);
2417         return ret;
2418 }
2419
2420 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2421                                         struct btrfs_root *root,
2422                                         struct btrfs_path *path,
2423                                         u64 objectid, u64 offset, u64 bytenr)
2424 {
2425         struct btrfs_root *extent_root = root->fs_info->extent_root;
2426         struct extent_buffer *leaf;
2427         struct btrfs_extent_data_ref *ref;
2428         struct btrfs_extent_inline_ref *iref;
2429         struct btrfs_extent_item *ei;
2430         struct btrfs_key key;
2431         u32 item_size;
2432         int ret;
2433
2434         key.objectid = bytenr;
2435         key.offset = (u64)-1;
2436         key.type = BTRFS_EXTENT_ITEM_KEY;
2437
2438         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2439         if (ret < 0)
2440                 goto out;
2441         BUG_ON(ret == 0);
2442
2443         ret = -ENOENT;
2444         if (path->slots[0] == 0)
2445                 goto out;
2446
2447         path->slots[0]--;
2448         leaf = path->nodes[0];
2449         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2450
2451         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2452                 goto out;
2453
2454         ret = 1;
2455         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2456 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2457         if (item_size < sizeof(*ei)) {
2458                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2459                 goto out;
2460         }
2461 #endif
2462         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2463
2464         if (item_size != sizeof(*ei) +
2465             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2466                 goto out;
2467
2468         if (btrfs_extent_generation(leaf, ei) <=
2469             btrfs_root_last_snapshot(&root->root_item))
2470                 goto out;
2471
2472         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2473         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2474             BTRFS_EXTENT_DATA_REF_KEY)
2475                 goto out;
2476
2477         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2478         if (btrfs_extent_refs(leaf, ei) !=
2479             btrfs_extent_data_ref_count(leaf, ref) ||
2480             btrfs_extent_data_ref_root(leaf, ref) !=
2481             root->root_key.objectid ||
2482             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2483             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2484                 goto out;
2485
2486         ret = 0;
2487 out:
2488         return ret;
2489 }
2490
2491 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2492                           struct btrfs_root *root,
2493                           u64 objectid, u64 offset, u64 bytenr)
2494 {
2495         struct btrfs_path *path;
2496         int ret;
2497         int ret2;
2498
2499         path = btrfs_alloc_path();
2500         if (!path)
2501                 return -ENOENT;
2502
2503         do {
2504                 ret = check_committed_ref(trans, root, path, objectid,
2505                                           offset, bytenr);
2506                 if (ret && ret != -ENOENT)
2507                         goto out;
2508
2509                 ret2 = check_delayed_ref(trans, root, path, objectid,
2510                                          offset, bytenr);
2511         } while (ret2 == -EAGAIN);
2512
2513         if (ret2 && ret2 != -ENOENT) {
2514                 ret = ret2;
2515                 goto out;
2516         }
2517
2518         if (ret != -ENOENT || ret2 != -ENOENT)
2519                 ret = 0;
2520 out:
2521         btrfs_free_path(path);
2522         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2523                 WARN_ON(ret > 0);
2524         return ret;
2525 }
2526
2527 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2528                            struct btrfs_root *root,
2529                            struct extent_buffer *buf,
2530                            int full_backref, int inc)
2531 {
2532         u64 bytenr;
2533         u64 num_bytes;
2534         u64 parent;
2535         u64 ref_root;
2536         u32 nritems;
2537         struct btrfs_key key;
2538         struct btrfs_file_extent_item *fi;
2539         int i;
2540         int level;
2541         int ret = 0;
2542         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2543                             u64, u64, u64, u64, u64, u64);
2544
2545         ref_root = btrfs_header_owner(buf);
2546         nritems = btrfs_header_nritems(buf);
2547         level = btrfs_header_level(buf);
2548
2549         if (!root->ref_cows && level == 0)
2550                 return 0;
2551
2552         if (inc)
2553                 process_func = btrfs_inc_extent_ref;
2554         else
2555                 process_func = btrfs_free_extent;
2556
2557         if (full_backref)
2558                 parent = buf->start;
2559         else
2560                 parent = 0;
2561
2562         for (i = 0; i < nritems; i++) {
2563                 if (level == 0) {
2564                         btrfs_item_key_to_cpu(buf, &key, i);
2565                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2566                                 continue;
2567                         fi = btrfs_item_ptr(buf, i,
2568                                             struct btrfs_file_extent_item);
2569                         if (btrfs_file_extent_type(buf, fi) ==
2570                             BTRFS_FILE_EXTENT_INLINE)
2571                                 continue;
2572                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2573                         if (bytenr == 0)
2574                                 continue;
2575
2576                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2577                         key.offset -= btrfs_file_extent_offset(buf, fi);
2578                         ret = process_func(trans, root, bytenr, num_bytes,
2579                                            parent, ref_root, key.objectid,
2580                                            key.offset);
2581                         if (ret)
2582                                 goto fail;
2583                 } else {
2584                         bytenr = btrfs_node_blockptr(buf, i);
2585                         num_bytes = btrfs_level_size(root, level - 1);
2586                         ret = process_func(trans, root, bytenr, num_bytes,
2587                                            parent, ref_root, level - 1, 0);
2588                         if (ret)
2589                                 goto fail;
2590                 }
2591         }
2592         return 0;
2593 fail:
2594         BUG();
2595         return ret;
2596 }
2597
2598 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2599                   struct extent_buffer *buf, int full_backref)
2600 {
2601         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2602 }
2603
2604 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2605                   struct extent_buffer *buf, int full_backref)
2606 {
2607         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2608 }
2609
2610 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2611                                  struct btrfs_root *root,
2612                                  struct btrfs_path *path,
2613                                  struct btrfs_block_group_cache *cache)
2614 {
2615         int ret;
2616         struct btrfs_root *extent_root = root->fs_info->extent_root;
2617         unsigned long bi;
2618         struct extent_buffer *leaf;
2619
2620         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2621         if (ret < 0)
2622                 goto fail;
2623         BUG_ON(ret);
2624
2625         leaf = path->nodes[0];
2626         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2627         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2628         btrfs_mark_buffer_dirty(leaf);
2629         btrfs_release_path(path);
2630 fail:
2631         if (ret)
2632                 return ret;
2633         return 0;
2634
2635 }
2636
2637 static struct btrfs_block_group_cache *
2638 next_block_group(struct btrfs_root *root,
2639                  struct btrfs_block_group_cache *cache)
2640 {
2641         struct rb_node *node;
2642         spin_lock(&root->fs_info->block_group_cache_lock);
2643         node = rb_next(&cache->cache_node);
2644         btrfs_put_block_group(cache);
2645         if (node) {
2646                 cache = rb_entry(node, struct btrfs_block_group_cache,
2647                                  cache_node);
2648                 btrfs_get_block_group(cache);
2649         } else
2650                 cache = NULL;
2651         spin_unlock(&root->fs_info->block_group_cache_lock);
2652         return cache;
2653 }
2654
2655 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2656                             struct btrfs_trans_handle *trans,
2657                             struct btrfs_path *path)
2658 {
2659         struct btrfs_root *root = block_group->fs_info->tree_root;
2660         struct inode *inode = NULL;
2661         u64 alloc_hint = 0;
2662         int dcs = BTRFS_DC_ERROR;
2663         int num_pages = 0;
2664         int retries = 0;
2665         int ret = 0;
2666
2667         /*
2668          * If this block group is smaller than 100 megs don't bother caching the
2669          * block group.
2670          */
2671         if (block_group->key.offset < (100 * 1024 * 1024)) {
2672                 spin_lock(&block_group->lock);
2673                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2674                 spin_unlock(&block_group->lock);
2675                 return 0;
2676         }
2677
2678 again:
2679         inode = lookup_free_space_inode(root, block_group, path);
2680         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2681                 ret = PTR_ERR(inode);
2682                 btrfs_release_path(path);
2683                 goto out;
2684         }
2685
2686         if (IS_ERR(inode)) {
2687                 BUG_ON(retries);
2688                 retries++;
2689
2690                 if (block_group->ro)
2691                         goto out_free;
2692
2693                 ret = create_free_space_inode(root, trans, block_group, path);
2694                 if (ret)
2695                         goto out_free;
2696                 goto again;
2697         }
2698
2699         /*
2700          * We want to set the generation to 0, that way if anything goes wrong
2701          * from here on out we know not to trust this cache when we load up next
2702          * time.
2703          */
2704         BTRFS_I(inode)->generation = 0;
2705         ret = btrfs_update_inode(trans, root, inode);
2706         WARN_ON(ret);
2707
2708         if (i_size_read(inode) > 0) {
2709                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2710                                                       inode);
2711                 if (ret)
2712                         goto out_put;
2713         }
2714
2715         spin_lock(&block_group->lock);
2716         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2717                 /* We're not cached, don't bother trying to write stuff out */
2718                 dcs = BTRFS_DC_WRITTEN;
2719                 spin_unlock(&block_group->lock);
2720                 goto out_put;
2721         }
2722         spin_unlock(&block_group->lock);
2723
2724         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2725         if (!num_pages)
2726                 num_pages = 1;
2727
2728         /*
2729          * Just to make absolutely sure we have enough space, we're going to
2730          * preallocate 12 pages worth of space for each block group.  In
2731          * practice we ought to use at most 8, but we need extra space so we can
2732          * add our header and have a terminator between the extents and the
2733          * bitmaps.
2734          */
2735         num_pages *= 16;
2736         num_pages *= PAGE_CACHE_SIZE;
2737
2738         ret = btrfs_check_data_free_space(inode, num_pages);
2739         if (ret)
2740                 goto out_put;
2741
2742         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2743                                               num_pages, num_pages,
2744                                               &alloc_hint);
2745         if (!ret)
2746                 dcs = BTRFS_DC_SETUP;
2747         btrfs_free_reserved_data_space(inode, num_pages);
2748 out_put:
2749         iput(inode);
2750 out_free:
2751         btrfs_release_path(path);
2752 out:
2753         spin_lock(&block_group->lock);
2754         block_group->disk_cache_state = dcs;
2755         spin_unlock(&block_group->lock);
2756
2757         return ret;
2758 }
2759
2760 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2761                                    struct btrfs_root *root)
2762 {
2763         struct btrfs_block_group_cache *cache;
2764         int err = 0;
2765         struct btrfs_path *path;
2766         u64 last = 0;
2767
2768         path = btrfs_alloc_path();
2769         if (!path)
2770                 return -ENOMEM;
2771
2772 again:
2773         while (1) {
2774                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2775                 while (cache) {
2776                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2777                                 break;
2778                         cache = next_block_group(root, cache);
2779                 }
2780                 if (!cache) {
2781                         if (last == 0)
2782                                 break;
2783                         last = 0;
2784                         continue;
2785                 }
2786                 err = cache_save_setup(cache, trans, path);
2787                 last = cache->key.objectid + cache->key.offset;
2788                 btrfs_put_block_group(cache);
2789         }
2790
2791         while (1) {
2792                 if (last == 0) {
2793                         err = btrfs_run_delayed_refs(trans, root,
2794                                                      (unsigned long)-1);
2795                         BUG_ON(err);
2796                 }
2797
2798                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2799                 while (cache) {
2800                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2801                                 btrfs_put_block_group(cache);
2802                                 goto again;
2803                         }
2804
2805                         if (cache->dirty)
2806                                 break;
2807                         cache = next_block_group(root, cache);
2808                 }
2809                 if (!cache) {
2810                         if (last == 0)
2811                                 break;
2812                         last = 0;
2813                         continue;
2814                 }
2815
2816                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2817                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2818                 cache->dirty = 0;
2819                 last = cache->key.objectid + cache->key.offset;
2820
2821                 err = write_one_cache_group(trans, root, path, cache);
2822                 BUG_ON(err);
2823                 btrfs_put_block_group(cache);
2824         }
2825
2826         while (1) {
2827                 /*
2828                  * I don't think this is needed since we're just marking our
2829                  * preallocated extent as written, but just in case it can't
2830                  * hurt.
2831                  */
2832                 if (last == 0) {
2833                         err = btrfs_run_delayed_refs(trans, root,
2834                                                      (unsigned long)-1);
2835                         BUG_ON(err);
2836                 }
2837
2838                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2839                 while (cache) {
2840                         /*
2841                          * Really this shouldn't happen, but it could if we
2842                          * couldn't write the entire preallocated extent and
2843                          * splitting the extent resulted in a new block.
2844                          */
2845                         if (cache->dirty) {
2846                                 btrfs_put_block_group(cache);
2847                                 goto again;
2848                         }
2849                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2850                                 break;
2851                         cache = next_block_group(root, cache);
2852                 }
2853                 if (!cache) {
2854                         if (last == 0)
2855                                 break;
2856                         last = 0;
2857                         continue;
2858                 }
2859
2860                 btrfs_write_out_cache(root, trans, cache, path);
2861
2862                 /*
2863                  * If we didn't have an error then the cache state is still
2864                  * NEED_WRITE, so we can set it to WRITTEN.
2865                  */
2866                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2867                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2868                 last = cache->key.objectid + cache->key.offset;
2869                 btrfs_put_block_group(cache);
2870         }
2871
2872         btrfs_free_path(path);
2873         return 0;
2874 }
2875
2876 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2877 {
2878         struct btrfs_block_group_cache *block_group;
2879         int readonly = 0;
2880
2881         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2882         if (!block_group || block_group->ro)
2883                 readonly = 1;
2884         if (block_group)
2885                 btrfs_put_block_group(block_group);
2886         return readonly;
2887 }
2888
2889 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2890                              u64 total_bytes, u64 bytes_used,
2891                              struct btrfs_space_info **space_info)
2892 {
2893         struct btrfs_space_info *found;
2894         int i;
2895         int factor;
2896
2897         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2898                      BTRFS_BLOCK_GROUP_RAID10))
2899                 factor = 2;
2900         else
2901                 factor = 1;
2902
2903         found = __find_space_info(info, flags);
2904         if (found) {
2905                 spin_lock(&found->lock);
2906                 found->total_bytes += total_bytes;
2907                 found->disk_total += total_bytes * factor;
2908                 found->bytes_used += bytes_used;
2909                 found->disk_used += bytes_used * factor;
2910                 found->full = 0;
2911                 spin_unlock(&found->lock);
2912                 *space_info = found;
2913                 return 0;
2914         }
2915         found = kzalloc(sizeof(*found), GFP_NOFS);
2916         if (!found)
2917                 return -ENOMEM;
2918
2919         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2920                 INIT_LIST_HEAD(&found->block_groups[i]);
2921         init_rwsem(&found->groups_sem);
2922         spin_lock_init(&found->lock);
2923         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2924                                 BTRFS_BLOCK_GROUP_SYSTEM |
2925                                 BTRFS_BLOCK_GROUP_METADATA);
2926         found->total_bytes = total_bytes;
2927         found->disk_total = total_bytes * factor;
2928         found->bytes_used = bytes_used;
2929         found->disk_used = bytes_used * factor;
2930         found->bytes_pinned = 0;
2931         found->bytes_reserved = 0;
2932         found->bytes_readonly = 0;
2933         found->bytes_may_use = 0;
2934         found->full = 0;
2935         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
2936         found->chunk_alloc = 0;
2937         *space_info = found;
2938         list_add_rcu(&found->list, &info->space_info);
2939         atomic_set(&found->caching_threads, 0);
2940         return 0;
2941 }
2942
2943 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2944 {
2945         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2946                                    BTRFS_BLOCK_GROUP_RAID1 |
2947                                    BTRFS_BLOCK_GROUP_RAID10 |
2948                                    BTRFS_BLOCK_GROUP_DUP);
2949         if (extra_flags) {
2950                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2951                         fs_info->avail_data_alloc_bits |= extra_flags;
2952                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2953                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2954                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2955                         fs_info->avail_system_alloc_bits |= extra_flags;
2956         }
2957 }
2958
2959 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2960 {
2961         /*
2962          * we add in the count of missing devices because we want
2963          * to make sure that any RAID levels on a degraded FS
2964          * continue to be honored.
2965          */
2966         u64 num_devices = root->fs_info->fs_devices->rw_devices +
2967                 root->fs_info->fs_devices->missing_devices;
2968
2969         if (num_devices == 1)
2970                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2971         if (num_devices < 4)
2972                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2973
2974         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2975             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2976                       BTRFS_BLOCK_GROUP_RAID10))) {
2977                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2978         }
2979
2980         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2981             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2982                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2983         }
2984
2985         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2986             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2987              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2988              (flags & BTRFS_BLOCK_GROUP_DUP)))
2989                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2990         return flags;
2991 }
2992
2993 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
2994 {
2995         if (flags & BTRFS_BLOCK_GROUP_DATA)
2996                 flags |= root->fs_info->avail_data_alloc_bits &
2997                          root->fs_info->data_alloc_profile;
2998         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2999                 flags |= root->fs_info->avail_system_alloc_bits &
3000                          root->fs_info->system_alloc_profile;
3001         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3002                 flags |= root->fs_info->avail_metadata_alloc_bits &
3003                          root->fs_info->metadata_alloc_profile;
3004         return btrfs_reduce_alloc_profile(root, flags);
3005 }
3006
3007 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3008 {
3009         u64 flags;
3010
3011         if (data)
3012                 flags = BTRFS_BLOCK_GROUP_DATA;
3013         else if (root == root->fs_info->chunk_root)
3014                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3015         else
3016                 flags = BTRFS_BLOCK_GROUP_METADATA;
3017
3018         return get_alloc_profile(root, flags);
3019 }
3020
3021 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3022 {
3023         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3024                                                        BTRFS_BLOCK_GROUP_DATA);
3025 }
3026
3027 /*
3028  * This will check the space that the inode allocates from to make sure we have
3029  * enough space for bytes.
3030  */
3031 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3032 {
3033         struct btrfs_space_info *data_sinfo;
3034         struct btrfs_root *root = BTRFS_I(inode)->root;
3035         u64 used;
3036         int ret = 0, committed = 0, alloc_chunk = 1;
3037
3038         /* make sure bytes are sectorsize aligned */
3039         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3040
3041         if (root == root->fs_info->tree_root ||
3042             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3043                 alloc_chunk = 0;
3044                 committed = 1;
3045         }
3046
3047         data_sinfo = BTRFS_I(inode)->space_info;
3048         if (!data_sinfo)
3049                 goto alloc;
3050
3051 again:
3052         /* make sure we have enough space to handle the data first */
3053         spin_lock(&data_sinfo->lock);
3054         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3055                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3056                 data_sinfo->bytes_may_use;
3057
3058         if (used + bytes > data_sinfo->total_bytes) {
3059                 struct btrfs_trans_handle *trans;
3060
3061                 /*
3062                  * if we don't have enough free bytes in this space then we need
3063                  * to alloc a new chunk.
3064                  */
3065                 if (!data_sinfo->full && alloc_chunk) {
3066                         u64 alloc_target;
3067
3068                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3069                         spin_unlock(&data_sinfo->lock);
3070 alloc:
3071                         alloc_target = btrfs_get_alloc_profile(root, 1);
3072                         trans = btrfs_join_transaction(root);
3073                         if (IS_ERR(trans))
3074                                 return PTR_ERR(trans);
3075
3076                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3077                                              bytes + 2 * 1024 * 1024,
3078                                              alloc_target,
3079                                              CHUNK_ALLOC_NO_FORCE);
3080                         btrfs_end_transaction(trans, root);
3081                         if (ret < 0) {
3082                                 if (ret != -ENOSPC)
3083                                         return ret;
3084                                 else
3085                                         goto commit_trans;
3086                         }
3087
3088                         if (!data_sinfo) {
3089                                 btrfs_set_inode_space_info(root, inode);
3090                                 data_sinfo = BTRFS_I(inode)->space_info;
3091                         }
3092                         goto again;
3093                 }
3094
3095                 /*
3096                  * If we have less pinned bytes than we want to allocate then
3097                  * don't bother committing the transaction, it won't help us.
3098                  */
3099                 if (data_sinfo->bytes_pinned < bytes)
3100                         committed = 1;
3101                 spin_unlock(&data_sinfo->lock);
3102
3103                 /* commit the current transaction and try again */
3104 commit_trans:
3105                 if (!committed &&
3106                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3107                         committed = 1;
3108                         trans = btrfs_join_transaction(root);
3109                         if (IS_ERR(trans))
3110                                 return PTR_ERR(trans);
3111                         ret = btrfs_commit_transaction(trans, root);
3112                         if (ret)
3113                                 return ret;
3114                         goto again;
3115                 }
3116
3117                 return -ENOSPC;
3118         }
3119         data_sinfo->bytes_may_use += bytes;
3120         BTRFS_I(inode)->reserved_bytes += bytes;
3121         spin_unlock(&data_sinfo->lock);
3122
3123         return 0;
3124 }
3125
3126 /*
3127  * called when we are clearing an delalloc extent from the
3128  * inode's io_tree or there was an error for whatever reason
3129  * after calling btrfs_check_data_free_space
3130  */
3131 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3132 {
3133         struct btrfs_root *root = BTRFS_I(inode)->root;
3134         struct btrfs_space_info *data_sinfo;
3135
3136         /* make sure bytes are sectorsize aligned */
3137         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3138
3139         data_sinfo = BTRFS_I(inode)->space_info;
3140         spin_lock(&data_sinfo->lock);
3141         data_sinfo->bytes_may_use -= bytes;
3142         BTRFS_I(inode)->reserved_bytes -= bytes;
3143         spin_unlock(&data_sinfo->lock);
3144 }
3145
3146 static void force_metadata_allocation(struct btrfs_fs_info *info)
3147 {
3148         struct list_head *head = &info->space_info;
3149         struct btrfs_space_info *found;
3150
3151         rcu_read_lock();
3152         list_for_each_entry_rcu(found, head, list) {
3153                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3154                         found->force_alloc = CHUNK_ALLOC_FORCE;
3155         }
3156         rcu_read_unlock();
3157 }
3158
3159 static int should_alloc_chunk(struct btrfs_root *root,
3160                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3161                               int force)
3162 {
3163         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3164         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3165         u64 thresh;
3166
3167         if (force == CHUNK_ALLOC_FORCE)
3168                 return 1;
3169
3170         /*
3171          * in limited mode, we want to have some free space up to
3172          * about 1% of the FS size.
3173          */
3174         if (force == CHUNK_ALLOC_LIMITED) {
3175                 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3176                 thresh = max_t(u64, 64 * 1024 * 1024,
3177                                div_factor_fine(thresh, 1));
3178
3179                 if (num_bytes - num_allocated < thresh)
3180                         return 1;
3181         }
3182
3183         /*
3184          * we have two similar checks here, one based on percentage
3185          * and once based on a hard number of 256MB.  The idea
3186          * is that if we have a good amount of free
3187          * room, don't allocate a chunk.  A good mount is
3188          * less than 80% utilized of the chunks we have allocated,
3189          * or more than 256MB free
3190          */
3191         if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3192                 return 0;
3193
3194         if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3195                 return 0;
3196
3197         thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3198
3199         /* 256MB or 5% of the FS */
3200         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3201
3202         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3203                 return 0;
3204         return 1;
3205 }
3206
3207 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3208                           struct btrfs_root *extent_root, u64 alloc_bytes,
3209                           u64 flags, int force)
3210 {
3211         struct btrfs_space_info *space_info;
3212         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3213         int wait_for_alloc = 0;
3214         int ret = 0;
3215
3216         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3217
3218         space_info = __find_space_info(extent_root->fs_info, flags);
3219         if (!space_info) {
3220                 ret = update_space_info(extent_root->fs_info, flags,
3221                                         0, 0, &space_info);
3222                 BUG_ON(ret);
3223         }
3224         BUG_ON(!space_info);
3225
3226 again:
3227         spin_lock(&space_info->lock);
3228         if (space_info->force_alloc)
3229                 force = space_info->force_alloc;
3230         if (space_info->full) {
3231                 spin_unlock(&space_info->lock);
3232                 return 0;
3233         }
3234
3235         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3236                 spin_unlock(&space_info->lock);
3237                 return 0;
3238         } else if (space_info->chunk_alloc) {
3239                 wait_for_alloc = 1;
3240         } else {
3241                 space_info->chunk_alloc = 1;
3242         }
3243
3244         spin_unlock(&space_info->lock);
3245
3246         mutex_lock(&fs_info->chunk_mutex);
3247
3248         /*
3249          * The chunk_mutex is held throughout the entirety of a chunk
3250          * allocation, so once we've acquired the chunk_mutex we know that the
3251          * other guy is done and we need to recheck and see if we should
3252          * allocate.
3253          */
3254         if (wait_for_alloc) {
3255                 mutex_unlock(&fs_info->chunk_mutex);
3256                 wait_for_alloc = 0;
3257                 goto again;
3258         }
3259
3260         /*
3261          * If we have mixed data/metadata chunks we want to make sure we keep
3262          * allocating mixed chunks instead of individual chunks.
3263          */
3264         if (btrfs_mixed_space_info(space_info))
3265                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3266
3267         /*
3268          * if we're doing a data chunk, go ahead and make sure that
3269          * we keep a reasonable number of metadata chunks allocated in the
3270          * FS as well.
3271          */
3272         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3273                 fs_info->data_chunk_allocations++;
3274                 if (!(fs_info->data_chunk_allocations %
3275                       fs_info->metadata_ratio))
3276                         force_metadata_allocation(fs_info);
3277         }
3278
3279         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3280         spin_lock(&space_info->lock);
3281         if (ret)
3282                 space_info->full = 1;
3283         else
3284                 ret = 1;
3285
3286         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3287         space_info->chunk_alloc = 0;
3288         spin_unlock(&space_info->lock);
3289         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3290         return ret;
3291 }
3292
3293 /*
3294  * shrink metadata reservation for delalloc
3295  */
3296 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3297                            struct btrfs_root *root, u64 to_reclaim, int sync)
3298 {
3299         struct btrfs_block_rsv *block_rsv;
3300         struct btrfs_space_info *space_info;
3301         u64 reserved;
3302         u64 max_reclaim;
3303         u64 reclaimed = 0;
3304         long time_left;
3305         int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3306         int loops = 0;
3307         unsigned long progress;
3308
3309         block_rsv = &root->fs_info->delalloc_block_rsv;
3310         space_info = block_rsv->space_info;
3311
3312         smp_mb();
3313         reserved = space_info->bytes_reserved;
3314         progress = space_info->reservation_progress;
3315
3316         if (reserved == 0)
3317                 return 0;
3318
3319         max_reclaim = min(reserved, to_reclaim);
3320
3321         while (loops < 1024) {
3322                 /* have the flusher threads jump in and do some IO */
3323                 smp_mb();
3324                 nr_pages = min_t(unsigned long, nr_pages,
3325                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3326                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3327
3328                 spin_lock(&space_info->lock);
3329                 if (reserved > space_info->bytes_reserved)
3330                         reclaimed += reserved - space_info->bytes_reserved;
3331                 reserved = space_info->bytes_reserved;
3332                 spin_unlock(&space_info->lock);
3333
3334                 loops++;
3335
3336                 if (reserved == 0 || reclaimed >= max_reclaim)
3337                         break;
3338
3339                 if (trans && trans->transaction->blocked)
3340                         return -EAGAIN;
3341
3342                 time_left = schedule_timeout_interruptible(1);
3343
3344                 /* We were interrupted, exit */
3345                 if (time_left)
3346                         break;
3347
3348                 /* we've kicked the IO a few times, if anything has been freed,
3349                  * exit.  There is no sense in looping here for a long time
3350                  * when we really need to commit the transaction, or there are
3351                  * just too many writers without enough free space
3352                  */
3353
3354                 if (loops > 3) {
3355                         smp_mb();
3356                         if (progress != space_info->reservation_progress)
3357                                 break;
3358                 }
3359
3360         }
3361         return reclaimed >= to_reclaim;
3362 }
3363
3364 /*
3365  * Retries tells us how many times we've called reserve_metadata_bytes.  The
3366  * idea is if this is the first call (retries == 0) then we will add to our
3367  * reserved count if we can't make the allocation in order to hold our place
3368  * while we go and try and free up space.  That way for retries > 1 we don't try
3369  * and add space, we just check to see if the amount of unused space is >= the
3370  * total space, meaning that our reservation is valid.
3371  *
3372  * However if we don't intend to retry this reservation, pass -1 as retries so
3373  * that it short circuits this logic.
3374  */
3375 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3376                                   struct btrfs_root *root,
3377                                   struct btrfs_block_rsv *block_rsv,
3378                                   u64 orig_bytes, int flush)
3379 {
3380         struct btrfs_space_info *space_info = block_rsv->space_info;
3381         u64 unused;
3382         u64 num_bytes = orig_bytes;
3383         int retries = 0;
3384         int ret = 0;
3385         bool reserved = false;
3386         bool committed = false;
3387
3388 again:
3389         ret = -ENOSPC;
3390         if (reserved)
3391                 num_bytes = 0;
3392
3393         spin_lock(&space_info->lock);
3394         unused = space_info->bytes_used + space_info->bytes_reserved +
3395                  space_info->bytes_pinned + space_info->bytes_readonly +
3396                  space_info->bytes_may_use;
3397
3398         /*
3399          * The idea here is that we've not already over-reserved the block group
3400          * then we can go ahead and save our reservation first and then start
3401          * flushing if we need to.  Otherwise if we've already overcommitted
3402          * lets start flushing stuff first and then come back and try to make
3403          * our reservation.
3404          */
3405         if (unused <= space_info->total_bytes) {
3406                 unused = space_info->total_bytes - unused;
3407                 if (unused >= num_bytes) {
3408                         if (!reserved)
3409                                 space_info->bytes_reserved += orig_bytes;
3410                         ret = 0;
3411                 } else {
3412                         /*
3413                          * Ok set num_bytes to orig_bytes since we aren't
3414                          * overocmmitted, this way we only try and reclaim what
3415                          * we need.
3416                          */
3417                         num_bytes = orig_bytes;
3418                 }
3419         } else {
3420                 /*
3421                  * Ok we're over committed, set num_bytes to the overcommitted
3422                  * amount plus the amount of bytes that we need for this
3423                  * reservation.
3424                  */
3425                 num_bytes = unused - space_info->total_bytes +
3426                         (orig_bytes * (retries + 1));
3427         }
3428
3429         /*
3430          * Couldn't make our reservation, save our place so while we're trying
3431          * to reclaim space we can actually use it instead of somebody else
3432          * stealing it from us.
3433          */
3434         if (ret && !reserved) {
3435                 space_info->bytes_reserved += orig_bytes;
3436                 reserved = true;
3437         }
3438
3439         spin_unlock(&space_info->lock);
3440
3441         if (!ret)
3442                 return 0;
3443
3444         if (!flush)
3445                 goto out;
3446
3447         /*
3448          * We do synchronous shrinking since we don't actually unreserve
3449          * metadata until after the IO is completed.
3450          */
3451         ret = shrink_delalloc(trans, root, num_bytes, 1);
3452         if (ret > 0)
3453                 return 0;
3454         else if (ret < 0)
3455                 goto out;
3456
3457         /*
3458          * So if we were overcommitted it's possible that somebody else flushed
3459          * out enough space and we simply didn't have enough space to reclaim,
3460          * so go back around and try again.
3461          */
3462         if (retries < 2) {
3463                 retries++;
3464                 goto again;
3465         }
3466
3467         spin_lock(&space_info->lock);
3468         /*
3469          * Not enough space to be reclaimed, don't bother committing the
3470          * transaction.
3471          */
3472         if (space_info->bytes_pinned < orig_bytes)
3473                 ret = -ENOSPC;
3474         spin_unlock(&space_info->lock);
3475         if (ret)
3476                 goto out;
3477
3478         ret = -EAGAIN;
3479         if (trans || committed)
3480                 goto out;
3481
3482         ret = -ENOSPC;
3483         trans = btrfs_join_transaction(root);
3484         if (IS_ERR(trans))
3485                 goto out;
3486         ret = btrfs_commit_transaction(trans, root);
3487         if (!ret) {
3488                 trans = NULL;
3489                 committed = true;
3490                 goto again;
3491         }
3492
3493 out:
3494         if (reserved) {
3495                 spin_lock(&space_info->lock);
3496                 space_info->bytes_reserved -= orig_bytes;
3497                 spin_unlock(&space_info->lock);
3498         }
3499
3500         return ret;
3501 }
3502
3503 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3504                                              struct btrfs_root *root)
3505 {
3506         struct btrfs_block_rsv *block_rsv;
3507         if (root->ref_cows)
3508                 block_rsv = trans->block_rsv;
3509         else
3510                 block_rsv = root->block_rsv;
3511
3512         if (!block_rsv)
3513                 block_rsv = &root->fs_info->empty_block_rsv;
3514
3515         return block_rsv;
3516 }
3517
3518 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3519                                u64 num_bytes)
3520 {
3521         int ret = -ENOSPC;
3522         spin_lock(&block_rsv->lock);
3523         if (block_rsv->reserved >= num_bytes) {
3524                 block_rsv->reserved -= num_bytes;
3525                 if (block_rsv->reserved < block_rsv->size)
3526                         block_rsv->full = 0;
3527                 ret = 0;
3528         }
3529         spin_unlock(&block_rsv->lock);
3530         return ret;
3531 }
3532
3533 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3534                                 u64 num_bytes, int update_size)
3535 {
3536         spin_lock(&block_rsv->lock);
3537         block_rsv->reserved += num_bytes;
3538         if (update_size)
3539                 block_rsv->size += num_bytes;
3540         else if (block_rsv->reserved >= block_rsv->size)
3541                 block_rsv->full = 1;
3542         spin_unlock(&block_rsv->lock);
3543 }
3544
3545 static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3546                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3547 {
3548         struct btrfs_space_info *space_info = block_rsv->space_info;
3549
3550         spin_lock(&block_rsv->lock);
3551         if (num_bytes == (u64)-1)
3552                 num_bytes = block_rsv->size;
3553         block_rsv->size -= num_bytes;
3554         if (block_rsv->reserved >= block_rsv->size) {
3555                 num_bytes = block_rsv->reserved - block_rsv->size;
3556                 block_rsv->reserved = block_rsv->size;
3557                 block_rsv->full = 1;
3558         } else {
3559                 num_bytes = 0;
3560         }
3561         spin_unlock(&block_rsv->lock);
3562
3563         if (num_bytes > 0) {
3564                 if (dest) {
3565                         spin_lock(&dest->lock);
3566                         if (!dest->full) {
3567                                 u64 bytes_to_add;
3568
3569                                 bytes_to_add = dest->size - dest->reserved;
3570                                 bytes_to_add = min(num_bytes, bytes_to_add);
3571                                 dest->reserved += bytes_to_add;
3572                                 if (dest->reserved >= dest->size)
3573                                         dest->full = 1;
3574                                 num_bytes -= bytes_to_add;
3575                         }
3576                         spin_unlock(&dest->lock);
3577                 }
3578                 if (num_bytes) {
3579                         spin_lock(&space_info->lock);
3580                         space_info->bytes_reserved -= num_bytes;
3581                         space_info->reservation_progress++;
3582                         spin_unlock(&space_info->lock);
3583                 }
3584         }
3585 }
3586
3587 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3588                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3589 {
3590         int ret;
3591
3592         ret = block_rsv_use_bytes(src, num_bytes);
3593         if (ret)
3594                 return ret;
3595
3596         block_rsv_add_bytes(dst, num_bytes, 1);
3597         return 0;
3598 }
3599
3600 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3601 {
3602         memset(rsv, 0, sizeof(*rsv));
3603         spin_lock_init(&rsv->lock);
3604         atomic_set(&rsv->usage, 1);
3605         rsv->priority = 6;
3606         INIT_LIST_HEAD(&rsv->list);
3607 }
3608
3609 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3610 {
3611         struct btrfs_block_rsv *block_rsv;
3612         struct btrfs_fs_info *fs_info = root->fs_info;
3613
3614         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3615         if (!block_rsv)
3616                 return NULL;
3617
3618         btrfs_init_block_rsv(block_rsv);
3619         block_rsv->space_info = __find_space_info(fs_info,
3620                                                   BTRFS_BLOCK_GROUP_METADATA);
3621         return block_rsv;
3622 }
3623
3624 void btrfs_free_block_rsv(struct btrfs_root *root,
3625                           struct btrfs_block_rsv *rsv)
3626 {
3627         if (rsv && atomic_dec_and_test(&rsv->usage)) {
3628                 btrfs_block_rsv_release(root, rsv, (u64)-1);
3629                 if (!rsv->durable)
3630                         kfree(rsv);
3631         }
3632 }
3633
3634 /*
3635  * make the block_rsv struct be able to capture freed space.
3636  * the captured space will re-add to the the block_rsv struct
3637  * after transaction commit
3638  */
3639 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3640                                  struct btrfs_block_rsv *block_rsv)
3641 {
3642         block_rsv->durable = 1;
3643         mutex_lock(&fs_info->durable_block_rsv_mutex);
3644         list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3645         mutex_unlock(&fs_info->durable_block_rsv_mutex);
3646 }
3647
3648 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3649                         struct btrfs_root *root,
3650                         struct btrfs_block_rsv *block_rsv,
3651                         u64 num_bytes)
3652 {
3653         int ret;
3654
3655         if (num_bytes == 0)
3656                 return 0;
3657
3658         ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
3659         if (!ret) {
3660                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3661                 return 0;
3662         }
3663
3664         return ret;
3665 }
3666
3667 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3668                           struct btrfs_root *root,
3669                           struct btrfs_block_rsv *block_rsv,
3670                           u64 min_reserved, int min_factor)
3671 {
3672         u64 num_bytes = 0;
3673         int commit_trans = 0;
3674         int ret = -ENOSPC;
3675
3676         if (!block_rsv)
3677                 return 0;
3678
3679         spin_lock(&block_rsv->lock);
3680         if (min_factor > 0)
3681                 num_bytes = div_factor(block_rsv->size, min_factor);
3682         if (min_reserved > num_bytes)
3683                 num_bytes = min_reserved;
3684
3685         if (block_rsv->reserved >= num_bytes) {
3686                 ret = 0;
3687         } else {
3688                 num_bytes -= block_rsv->reserved;
3689                 if (block_rsv->durable &&
3690                     block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3691                         commit_trans = 1;
3692         }
3693         spin_unlock(&block_rsv->lock);
3694         if (!ret)
3695                 return 0;
3696
3697         if (block_rsv->refill_used) {
3698                 ret = reserve_metadata_bytes(trans, root, block_rsv,
3699                                              num_bytes, 0);
3700                 if (!ret) {
3701                         block_rsv_add_bytes(block_rsv, num_bytes, 0);
3702                         return 0;
3703                 }
3704         }
3705
3706         if (commit_trans) {
3707                 if (trans)
3708                         return -EAGAIN;
3709
3710                 trans = btrfs_join_transaction(root);
3711                 BUG_ON(IS_ERR(trans));
3712                 ret = btrfs_commit_transaction(trans, root);
3713                 return 0;
3714         }
3715
3716         return -ENOSPC;
3717 }
3718
3719 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3720                             struct btrfs_block_rsv *dst_rsv,
3721                             u64 num_bytes)
3722 {
3723         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3724 }
3725
3726 void btrfs_block_rsv_release(struct btrfs_root *root,
3727                              struct btrfs_block_rsv *block_rsv,
3728                              u64 num_bytes)
3729 {
3730         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3731         if (global_rsv->full || global_rsv == block_rsv ||
3732             block_rsv->space_info != global_rsv->space_info)
3733                 global_rsv = NULL;
3734         block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3735 }
3736
3737 /*
3738  * helper to calculate size of global block reservation.
3739  * the desired value is sum of space used by extent tree,
3740  * checksum tree and root tree
3741  */
3742 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3743 {
3744         struct btrfs_space_info *sinfo;
3745         u64 num_bytes;
3746         u64 meta_used;
3747         u64 data_used;
3748         int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3749
3750         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3751         spin_lock(&sinfo->lock);
3752         data_used = sinfo->bytes_used;
3753         spin_unlock(&sinfo->lock);
3754
3755         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3756         spin_lock(&sinfo->lock);
3757         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3758                 data_used = 0;
3759         meta_used = sinfo->bytes_used;
3760         spin_unlock(&sinfo->lock);
3761
3762         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3763                     csum_size * 2;
3764         num_bytes += div64_u64(data_used + meta_used, 50);
3765
3766         if (num_bytes * 3 > meta_used)
3767                 num_bytes = div64_u64(meta_used, 3);
3768
3769         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3770 }
3771
3772 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3773 {
3774         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3775         struct btrfs_space_info *sinfo = block_rsv->space_info;
3776         u64 num_bytes;
3777
3778         num_bytes = calc_global_metadata_size(fs_info);
3779
3780         spin_lock(&block_rsv->lock);
3781         spin_lock(&sinfo->lock);
3782
3783         block_rsv->size = num_bytes;
3784
3785         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3786                     sinfo->bytes_reserved + sinfo->bytes_readonly +
3787                     sinfo->bytes_may_use;
3788
3789         if (sinfo->total_bytes > num_bytes) {
3790                 num_bytes = sinfo->total_bytes - num_bytes;
3791                 block_rsv->reserved += num_bytes;
3792                 sinfo->bytes_reserved += num_bytes;
3793         }
3794
3795         if (block_rsv->reserved >= block_rsv->size) {
3796                 num_bytes = block_rsv->reserved - block_rsv->size;
3797                 sinfo->bytes_reserved -= num_bytes;
3798                 sinfo->reservation_progress++;
3799                 block_rsv->reserved = block_rsv->size;
3800                 block_rsv->full = 1;
3801         }
3802
3803         spin_unlock(&sinfo->lock);
3804         spin_unlock(&block_rsv->lock);
3805 }
3806
3807 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3808 {
3809         struct btrfs_space_info *space_info;
3810
3811         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3812         fs_info->chunk_block_rsv.space_info = space_info;
3813         fs_info->chunk_block_rsv.priority = 10;
3814
3815         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3816         fs_info->global_block_rsv.space_info = space_info;
3817         fs_info->global_block_rsv.priority = 10;
3818         fs_info->global_block_rsv.refill_used = 1;
3819         fs_info->delalloc_block_rsv.space_info = space_info;
3820         fs_info->trans_block_rsv.space_info = space_info;
3821         fs_info->empty_block_rsv.space_info = space_info;
3822         fs_info->empty_block_rsv.priority = 10;
3823
3824         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3825         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3826         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3827         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3828         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3829
3830         btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3831
3832         btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3833
3834         update_global_block_rsv(fs_info);
3835 }
3836
3837 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3838 {
3839         block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3840         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3841         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3842         WARN_ON(fs_info->trans_block_rsv.size > 0);
3843         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3844         WARN_ON(fs_info->chunk_block_rsv.size > 0);
3845         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3846 }
3847
3848 int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3849                                     struct btrfs_root *root,
3850                                     struct btrfs_block_rsv *rsv)
3851 {
3852         struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3853         u64 num_bytes;
3854         int ret;
3855
3856         /*
3857          * Truncate should be freeing data, but give us 2 items just in case it
3858          * needs to use some space.  We may want to be smarter about this in the
3859          * future.
3860          */
3861         num_bytes = btrfs_calc_trans_metadata_size(root, 2);
3862
3863         /* We already have enough bytes, just return */
3864         if (rsv->reserved >= num_bytes)
3865                 return 0;
3866
3867         num_bytes -= rsv->reserved;
3868
3869         /*
3870          * You should have reserved enough space before hand to do this, so this
3871          * should not fail.
3872          */
3873         ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
3874         BUG_ON(ret);
3875
3876         return 0;
3877 }
3878
3879 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3880                                  struct btrfs_root *root,
3881                                  int num_items)
3882 {
3883         u64 num_bytes;
3884         int ret;
3885
3886         if (num_items == 0 || root->fs_info->chunk_root == root)
3887                 return 0;
3888
3889         num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
3890         ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3891                                   num_bytes);
3892         if (!ret) {
3893                 trans->bytes_reserved += num_bytes;
3894                 trans->block_rsv = &root->fs_info->trans_block_rsv;
3895         }
3896         return ret;
3897 }
3898
3899 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3900                                   struct btrfs_root *root)
3901 {
3902         if (!trans->bytes_reserved)
3903                 return;
3904
3905         BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3906         btrfs_block_rsv_release(root, trans->block_rsv,
3907                                 trans->bytes_reserved);
3908         trans->bytes_reserved = 0;
3909 }
3910
3911 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3912                                   struct inode *inode)
3913 {
3914         struct btrfs_root *root = BTRFS_I(inode)->root;
3915         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3916         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3917
3918         /*
3919          * We need to hold space in order to delete our orphan item once we've
3920          * added it, so this takes the reservation so we can release it later
3921          * when we are truly done with the orphan item.
3922          */
3923         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3924         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3925 }
3926
3927 void btrfs_orphan_release_metadata(struct inode *inode)
3928 {
3929         struct btrfs_root *root = BTRFS_I(inode)->root;
3930         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3931         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3932 }
3933
3934 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3935                                 struct btrfs_pending_snapshot *pending)
3936 {
3937         struct btrfs_root *root = pending->root;
3938         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3939         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3940         /*
3941          * two for root back/forward refs, two for directory entries
3942          * and one for root of the snapshot.
3943          */
3944         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3945         dst_rsv->space_info = src_rsv->space_info;
3946         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3947 }
3948
3949 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3950 {
3951         return num_bytes >>= 3;
3952 }
3953
3954 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3955 {
3956         struct btrfs_root *root = BTRFS_I(inode)->root;
3957         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3958         u64 to_reserve;
3959         int nr_extents;
3960         int reserved_extents;
3961         int ret;
3962
3963         if (btrfs_transaction_in_commit(root->fs_info))
3964                 schedule_timeout(1);
3965
3966         num_bytes = ALIGN(num_bytes, root->sectorsize);
3967
3968         nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3969         reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
3970
3971         if (nr_extents > reserved_extents) {
3972                 nr_extents -= reserved_extents;
3973                 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
3974         } else {
3975                 nr_extents = 0;
3976                 to_reserve = 0;
3977         }
3978
3979         to_reserve += calc_csum_metadata_size(inode, num_bytes);
3980         ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
3981         if (ret)
3982                 return ret;
3983
3984         atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
3985         atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3986
3987         block_rsv_add_bytes(block_rsv, to_reserve, 1);
3988
3989         if (block_rsv->size > 512 * 1024 * 1024)
3990                 shrink_delalloc(NULL, root, to_reserve, 0);
3991
3992         return 0;
3993 }
3994
3995 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3996 {
3997         struct btrfs_root *root = BTRFS_I(inode)->root;
3998         u64 to_free;
3999         int nr_extents;
4000         int reserved_extents;
4001
4002         num_bytes = ALIGN(num_bytes, root->sectorsize);
4003         atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4004         WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4005
4006         reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4007         do {
4008                 int old, new;
4009
4010                 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4011                 if (nr_extents >= reserved_extents) {
4012                         nr_extents = 0;
4013                         break;
4014                 }
4015                 old = reserved_extents;
4016                 nr_extents = reserved_extents - nr_extents;
4017                 new = reserved_extents - nr_extents;
4018                 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4019                                      reserved_extents, new);
4020                 if (likely(old == reserved_extents))
4021                         break;
4022                 reserved_extents = old;
4023         } while (1);
4024
4025         to_free = calc_csum_metadata_size(inode, num_bytes);
4026         if (nr_extents > 0)
4027                 to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
4028
4029         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4030                                 to_free);
4031 }
4032
4033 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4034 {
4035         int ret;
4036
4037         ret = btrfs_check_data_free_space(inode, num_bytes);
4038         if (ret)
4039                 return ret;
4040
4041         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4042         if (ret) {
4043                 btrfs_free_reserved_data_space(inode, num_bytes);
4044                 return ret;
4045         }
4046
4047         return 0;
4048 }
4049
4050 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4051 {
4052         btrfs_delalloc_release_metadata(inode, num_bytes);
4053         btrfs_free_reserved_data_space(inode, num_bytes);
4054 }
4055
4056 static int update_block_group(struct btrfs_trans_handle *trans,
4057                               struct btrfs_root *root,
4058                               u64 bytenr, u64 num_bytes, int alloc)
4059 {
4060         struct btrfs_block_group_cache *cache = NULL;
4061         struct btrfs_fs_info *info = root->fs_info;
4062         u64 total = num_bytes;
4063         u64 old_val;
4064         u64 byte_in_group;
4065         int factor;
4066
4067         /* block accounting for super block */
4068         spin_lock(&info->delalloc_lock);
4069         old_val = btrfs_super_bytes_used(&info->super_copy);
4070         if (alloc)
4071                 old_val += num_bytes;
4072         else
4073                 old_val -= num_bytes;
4074         btrfs_set_super_bytes_used(&info->super_copy, old_val);
4075         spin_unlock(&info->delalloc_lock);
4076
4077         while (total) {
4078                 cache = btrfs_lookup_block_group(info, bytenr);
4079                 if (!cache)
4080                         return -1;
4081                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4082                                     BTRFS_BLOCK_GROUP_RAID1 |
4083                                     BTRFS_BLOCK_GROUP_RAID10))
4084                         factor = 2;
4085                 else
4086                         factor = 1;
4087                 /*
4088                  * If this block group has free space cache written out, we
4089                  * need to make sure to load it if we are removing space.  This
4090                  * is because we need the unpinning stage to actually add the
4091                  * space back to the block group, otherwise we will leak space.
4092                  */
4093                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4094                         cache_block_group(cache, trans, NULL, 1);
4095
4096                 byte_in_group = bytenr - cache->key.objectid;
4097                 WARN_ON(byte_in_group > cache->key.offset);
4098
4099                 spin_lock(&cache->space_info->lock);
4100                 spin_lock(&cache->lock);
4101
4102                 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4103                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4104                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4105
4106                 cache->dirty = 1;
4107                 old_val = btrfs_block_group_used(&cache->item);
4108                 num_bytes = min(total, cache->key.offset - byte_in_group);
4109                 if (alloc) {
4110                         old_val += num_bytes;
4111                         btrfs_set_block_group_used(&cache->item, old_val);
4112                         cache->reserved -= num_bytes;
4113                         cache->space_info->bytes_reserved -= num_bytes;
4114                         cache->space_info->reservation_progress++;
4115                         cache->space_info->bytes_used += num_bytes;
4116                         cache->space_info->disk_used += num_bytes * factor;
4117                         spin_unlock(&cache->lock);
4118                         spin_unlock(&cache->space_info->lock);
4119                 } else {
4120                         old_val -= num_bytes;
4121                         btrfs_set_block_group_used(&cache->item, old_val);
4122                         cache->pinned += num_bytes;
4123                         cache->space_info->bytes_pinned += num_bytes;
4124                         cache->space_info->bytes_used -= num_bytes;
4125                         cache->space_info->disk_used -= num_bytes * factor;
4126                         spin_unlock(&cache->lock);
4127                         spin_unlock(&cache->space_info->lock);
4128
4129                         set_extent_dirty(info->pinned_extents,
4130                                          bytenr, bytenr + num_bytes - 1,
4131                                          GFP_NOFS | __GFP_NOFAIL);
4132                 }
4133                 btrfs_put_block_group(cache);
4134                 total -= num_bytes;
4135                 bytenr += num_bytes;
4136         }
4137         return 0;
4138 }
4139
4140 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4141 {
4142         struct btrfs_block_group_cache *cache;
4143         u64 bytenr;
4144
4145         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4146         if (!cache)
4147                 return 0;
4148
4149         bytenr = cache->key.objectid;
4150         btrfs_put_block_group(cache);
4151
4152         return bytenr;
4153 }
4154
4155 static int pin_down_extent(struct btrfs_root *root,
4156                            struct btrfs_block_group_cache *cache,
4157                            u64 bytenr, u64 num_bytes, int reserved)
4158 {
4159         spin_lock(&cache->space_info->lock);
4160         spin_lock(&cache->lock);
4161         cache->pinned += num_bytes;
4162         cache->space_info->bytes_pinned += num_bytes;
4163         if (reserved) {
4164                 cache->reserved -= num_bytes;
4165                 cache->space_info->bytes_reserved -= num_bytes;
4166                 cache->space_info->reservation_progress++;
4167         }
4168         spin_unlock(&cache->lock);
4169         spin_unlock(&cache->space_info->lock);
4170
4171         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4172                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4173         return 0;
4174 }
4175
4176 /*
4177  * this function must be called within transaction
4178  */
4179 int btrfs_pin_extent(struct btrfs_root *root,
4180                      u64 bytenr, u64 num_bytes, int reserved)
4181 {
4182         struct btrfs_block_group_cache *cache;
4183
4184         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4185         BUG_ON(!cache);
4186
4187         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4188
4189         btrfs_put_block_group(cache);
4190         return 0;
4191 }
4192
4193 /*
4194  * update size of reserved extents. this function may return -EAGAIN
4195  * if 'reserve' is true or 'sinfo' is false.
4196  */
4197 int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4198                                 u64 num_bytes, int reserve, int sinfo)
4199 {
4200         int ret = 0;
4201         if (sinfo) {
4202                 struct btrfs_space_info *space_info = cache->space_info;
4203                 spin_lock(&space_info->lock);
4204                 spin_lock(&cache->lock);
4205                 if (reserve) {
4206                         if (cache->ro) {
4207                                 ret = -EAGAIN;
4208                         } else {
4209                                 cache->reserved += num_bytes;
4210                                 space_info->bytes_reserved += num_bytes;
4211                         }
4212                 } else {
4213                         if (cache->ro)
4214                                 space_info->bytes_readonly += num_bytes;
4215                         cache->reserved -= num_bytes;
4216                         space_info->bytes_reserved -= num_bytes;
4217                         space_info->reservation_progress++;
4218                 }
4219                 spin_unlock(&cache->lock);
4220                 spin_unlock(&space_info->lock);
4221         } else {
4222                 spin_lock(&cache->lock);
4223                 if (cache->ro) {
4224                         ret = -EAGAIN;
4225                 } else {
4226                         if (reserve)
4227                                 cache->reserved += num_bytes;
4228                         else
4229                                 cache->reserved -= num_bytes;
4230                 }
4231                 spin_unlock(&cache->lock);
4232         }
4233         return ret;
4234 }
4235
4236 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4237                                 struct btrfs_root *root)
4238 {
4239         struct btrfs_fs_info *fs_info = root->fs_info;
4240         struct btrfs_caching_control *next;
4241         struct btrfs_caching_control *caching_ctl;
4242         struct btrfs_block_group_cache *cache;
4243
4244         down_write(&fs_info->extent_commit_sem);
4245
4246         list_for_each_entry_safe(caching_ctl, next,
4247                                  &fs_info->caching_block_groups, list) {
4248                 cache = caching_ctl->block_group;
4249                 if (block_group_cache_done(cache)) {
4250                         cache->last_byte_to_unpin = (u64)-1;
4251                         list_del_init(&caching_ctl->list);
4252                         put_caching_control(caching_ctl);
4253                 } else {
4254                         cache->last_byte_to_unpin = caching_ctl->progress;
4255                 }
4256         }
4257
4258         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4259                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4260         else
4261                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4262
4263         up_write(&fs_info->extent_commit_sem);
4264
4265         update_global_block_rsv(fs_info);
4266         return 0;
4267 }
4268
4269 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4270 {
4271         struct btrfs_fs_info *fs_info = root->fs_info;
4272         struct btrfs_block_group_cache *cache = NULL;
4273         u64 len;
4274
4275         while (start <= end) {
4276                 if (!cache ||
4277                     start >= cache->key.objectid + cache->key.offset) {
4278                         if (cache)
4279                                 btrfs_put_block_group(cache);
4280                         cache = btrfs_lookup_block_group(fs_info, start);
4281                         BUG_ON(!cache);
4282                 }
4283
4284                 len = cache->key.objectid + cache->key.offset - start;
4285                 len = min(len, end + 1 - start);
4286
4287                 if (start < cache->last_byte_to_unpin) {
4288                         len = min(len, cache->last_byte_to_unpin - start);
4289                         btrfs_add_free_space(cache, start, len);
4290                 }
4291
4292                 start += len;
4293
4294                 spin_lock(&cache->space_info->lock);
4295                 spin_lock(&cache->lock);
4296                 cache->pinned -= len;
4297                 cache->space_info->bytes_pinned -= len;
4298                 if (cache->ro) {
4299                         cache->space_info->bytes_readonly += len;
4300                 } else if (cache->reserved_pinned > 0) {
4301                         len = min(len, cache->reserved_pinned);
4302                         cache->reserved_pinned -= len;
4303                         cache->space_info->bytes_reserved += len;
4304                 }
4305                 spin_unlock(&cache->lock);
4306                 spin_unlock(&cache->space_info->lock);
4307         }
4308
4309         if (cache)
4310                 btrfs_put_block_group(cache);
4311         return 0;
4312 }
4313
4314 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4315                                struct btrfs_root *root)
4316 {
4317         struct btrfs_fs_info *fs_info = root->fs_info;
4318         struct extent_io_tree *unpin;
4319         struct btrfs_block_rsv *block_rsv;
4320         struct btrfs_block_rsv *next_rsv;
4321         u64 start;
4322         u64 end;
4323         int idx;
4324         int ret;
4325
4326         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4327                 unpin = &fs_info->freed_extents[1];
4328         else
4329                 unpin = &fs_info->freed_extents[0];
4330
4331         while (1) {
4332                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4333                                             EXTENT_DIRTY);
4334                 if (ret)
4335                         break;
4336
4337                 if (btrfs_test_opt(root, DISCARD))
4338                         ret = btrfs_discard_extent(root, start,
4339                                                    end + 1 - start, NULL);
4340
4341                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4342                 unpin_extent_range(root, start, end);
4343                 cond_resched();
4344         }
4345
4346         mutex_lock(&fs_info->durable_block_rsv_mutex);
4347         list_for_each_entry_safe(block_rsv, next_rsv,
4348                                  &fs_info->durable_block_rsv_list, list) {
4349
4350                 idx = trans->transid & 0x1;
4351                 if (block_rsv->freed[idx] > 0) {
4352                         block_rsv_add_bytes(block_rsv,
4353                                             block_rsv->freed[idx], 0);
4354                         block_rsv->freed[idx] = 0;
4355                 }
4356                 if (atomic_read(&block_rsv->usage) == 0) {
4357                         btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4358
4359                         if (block_rsv->freed[0] == 0 &&
4360                             block_rsv->freed[1] == 0) {
4361                                 list_del_init(&block_rsv->list);
4362                                 kfree(block_rsv);
4363                         }
4364                 } else {
4365                         btrfs_block_rsv_release(root, block_rsv, 0);
4366                 }
4367         }
4368         mutex_unlock(&fs_info->durable_block_rsv_mutex);
4369
4370         return 0;
4371 }
4372
4373 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4374                                 struct btrfs_root *root,
4375                                 u64 bytenr, u64 num_bytes, u64 parent,
4376                                 u64 root_objectid, u64 owner_objectid,
4377                                 u64 owner_offset, int refs_to_drop,
4378                                 struct btrfs_delayed_extent_op *extent_op)
4379 {
4380         struct btrfs_key key;
4381         struct btrfs_path *path;
4382         struct btrfs_fs_info *info = root->fs_info;
4383         struct btrfs_root *extent_root = info->extent_root;
4384         struct extent_buffer *leaf;
4385         struct btrfs_extent_item *ei;
4386         struct btrfs_extent_inline_ref *iref;
4387         int ret;
4388         int is_data;
4389         int extent_slot = 0;
4390         int found_extent = 0;
4391         int num_to_del = 1;
4392         u32 item_size;
4393         u64 refs;
4394
4395         path = btrfs_alloc_path();
4396         if (!path)
4397                 return -ENOMEM;
4398
4399         path->reada = 1;
4400         path->leave_spinning = 1;
4401
4402         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4403         BUG_ON(!is_data && refs_to_drop != 1);
4404
4405         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4406                                     bytenr, num_bytes, parent,
4407                                     root_objectid, owner_objectid,
4408                                     owner_offset);
4409         if (ret == 0) {
4410                 extent_slot = path->slots[0];
4411                 while (extent_slot >= 0) {
4412                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4413                                               extent_slot);
4414                         if (key.objectid != bytenr)
4415                                 break;
4416                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4417                             key.offset == num_bytes) {
4418                                 found_extent = 1;
4419                                 break;
4420                         }
4421                         if (path->slots[0] - extent_slot > 5)
4422                                 break;
4423                         extent_slot--;
4424                 }
4425 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4426                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4427                 if (found_extent && item_size < sizeof(*ei))
4428                         found_extent = 0;
4429 #endif
4430                 if (!found_extent) {
4431                         BUG_ON(iref);
4432                         ret = remove_extent_backref(trans, extent_root, path,
4433                                                     NULL, refs_to_drop,
4434                                                     is_data);
4435                         BUG_ON(ret);
4436                         btrfs_release_path(path);
4437                         path->leave_spinning = 1;
4438
4439                         key.objectid = bytenr;
4440                         key.type = BTRFS_EXTENT_ITEM_KEY;
4441                         key.offset = num_bytes;
4442
4443                         ret = btrfs_search_slot(trans, extent_root,
4444                                                 &key, path, -1, 1);
4445                         if (ret) {
4446                                 printk(KERN_ERR "umm, got %d back from search"
4447                                        ", was looking for %llu\n", ret,
4448                                        (unsigned long long)bytenr);
4449                                 btrfs_print_leaf(extent_root, path->nodes[0]);
4450                         }
4451                         BUG_ON(ret);
4452                         extent_slot = path->slots[0];
4453                 }
4454         } else {
4455                 btrfs_print_leaf(extent_root, path->nodes[0]);
4456                 WARN_ON(1);
4457                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4458                        "parent %llu root %llu  owner %llu offset %llu\n",
4459                        (unsigned long long)bytenr,
4460                        (unsigned long long)parent,
4461                        (unsigned long long)root_objectid,
4462                        (unsigned long long)owner_objectid,
4463                        (unsigned long long)owner_offset);
4464         }
4465
4466         leaf = path->nodes[0];
4467         item_size = btrfs_item_size_nr(leaf, extent_slot);
4468 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4469         if (item_size < sizeof(*ei)) {
4470                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4471                 ret = convert_extent_item_v0(trans, extent_root, path,
4472                                              owner_objectid, 0);
4473                 BUG_ON(ret < 0);
4474
4475                 btrfs_release_path(path);
4476                 path->leave_spinning = 1;
4477
4478                 key.objectid = bytenr;
4479                 key.type = BTRFS_EXTENT_ITEM_KEY;
4480                 key.offset = num_bytes;
4481
4482                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4483                                         -1, 1);
4484                 if (ret) {
4485                         printk(KERN_ERR "umm, got %d back from search"
4486                                ", was looking for %llu\n", ret,
4487                                (unsigned long long)bytenr);
4488                         btrfs_print_leaf(extent_root, path->nodes[0]);
4489                 }
4490                 BUG_ON(ret);
4491                 extent_slot = path->slots[0];
4492                 leaf = path->nodes[0];
4493                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4494         }
4495 #endif
4496         BUG_ON(item_size < sizeof(*ei));
4497         ei = btrfs_item_ptr(leaf, extent_slot,
4498                             struct btrfs_extent_item);
4499         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4500                 struct btrfs_tree_block_info *bi;
4501                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4502                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4503                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4504         }
4505
4506         refs = btrfs_extent_refs(leaf, ei);
4507         BUG_ON(refs < refs_to_drop);
4508         refs -= refs_to_drop;
4509
4510         if (refs > 0) {
4511                 if (extent_op)
4512                         __run_delayed_extent_op(extent_op, leaf, ei);
4513                 /*
4514                  * In the case of inline back ref, reference count will
4515                  * be updated by remove_extent_backref
4516                  */
4517                 if (iref) {
4518                         BUG_ON(!found_extent);
4519                 } else {
4520                         btrfs_set_extent_refs(leaf, ei, refs);
4521                         btrfs_mark_buffer_dirty(leaf);
4522                 }
4523                 if (found_extent) {
4524                         ret = remove_extent_backref(trans, extent_root, path,
4525                                                     iref, refs_to_drop,
4526                                                     is_data);
4527                         BUG_ON(ret);
4528                 }
4529         } else {
4530                 if (found_extent) {
4531                         BUG_ON(is_data && refs_to_drop !=
4532                                extent_data_ref_count(root, path, iref));
4533                         if (iref) {
4534                                 BUG_ON(path->slots[0] != extent_slot);
4535                         } else {
4536                                 BUG_ON(path->slots[0] != extent_slot + 1);
4537                                 path->slots[0] = extent_slot;
4538                                 num_to_del = 2;
4539                         }
4540                 }
4541
4542                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4543                                       num_to_del);
4544                 BUG_ON(ret);
4545                 btrfs_release_path(path);
4546
4547                 if (is_data) {
4548                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4549                         BUG_ON(ret);
4550                 } else {
4551                         invalidate_mapping_pages(info->btree_inode->i_mapping,
4552                              bytenr >> PAGE_CACHE_SHIFT,
4553                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4554                 }
4555
4556                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4557                 BUG_ON(ret);
4558         }
4559         btrfs_free_path(path);
4560         return ret;
4561 }
4562
4563 /*
4564  * when we free an block, it is possible (and likely) that we free the last
4565  * delayed ref for that extent as well.  This searches the delayed ref tree for
4566  * a given extent, and if there are no other delayed refs to be processed, it
4567  * removes it from the tree.
4568  */
4569 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4570                                       struct btrfs_root *root, u64 bytenr)
4571 {
4572         struct btrfs_delayed_ref_head *head;
4573         struct btrfs_delayed_ref_root *delayed_refs;
4574         struct btrfs_delayed_ref_node *ref;
4575         struct rb_node *node;
4576         int ret = 0;
4577
4578         delayed_refs = &trans->transaction->delayed_refs;
4579         spin_lock(&delayed_refs->lock);
4580         head = btrfs_find_delayed_ref_head(trans, bytenr);
4581         if (!head)
4582                 goto out;
4583
4584         node = rb_prev(&head->node.rb_node);
4585         if (!node)
4586                 goto out;
4587
4588         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4589
4590         /* there are still entries for this ref, we can't drop it */
4591         if (ref->bytenr == bytenr)
4592                 goto out;
4593
4594         if (head->extent_op) {
4595                 if (!head->must_insert_reserved)
4596                         goto out;
4597                 kfree(head->extent_op);
4598                 head->extent_op = NULL;
4599         }
4600
4601         /*
4602          * waiting for the lock here would deadlock.  If someone else has it
4603          * locked they are already in the process of dropping it anyway
4604          */
4605         if (!mutex_trylock(&head->mutex))
4606                 goto out;
4607
4608         /*
4609          * at this point we have a head with no other entries.  Go
4610          * ahead and process it.
4611          */
4612         head->node.in_tree = 0;
4613         rb_erase(&head->node.rb_node, &delayed_refs->root);
4614
4615         delayed_refs->num_entries--;
4616
4617         /*
4618          * we don't take a ref on the node because we're removing it from the
4619          * tree, so we just steal the ref the tree was holding.
4620          */
4621         delayed_refs->num_heads--;
4622         if (list_empty(&head->cluster))
4623                 delayed_refs->num_heads_ready--;
4624
4625         list_del_init(&head->cluster);
4626         spin_unlock(&delayed_refs->lock);
4627
4628         BUG_ON(head->extent_op);
4629         if (head->must_insert_reserved)
4630                 ret = 1;
4631
4632         mutex_unlock(&head->mutex);
4633         btrfs_put_delayed_ref(&head->node);
4634         return ret;
4635 out:
4636         spin_unlock(&delayed_refs->lock);
4637         return 0;
4638 }
4639
4640 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4641                            struct btrfs_root *root,
4642                            struct extent_buffer *buf,
4643                            u64 parent, int last_ref)
4644 {
4645         struct btrfs_block_rsv *block_rsv;
4646         struct btrfs_block_group_cache *cache = NULL;
4647         int ret;
4648
4649         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4650                 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4651                                                 parent, root->root_key.objectid,
4652                                                 btrfs_header_level(buf),
4653                                                 BTRFS_DROP_DELAYED_REF, NULL);
4654                 BUG_ON(ret);
4655         }
4656
4657         if (!last_ref)
4658                 return;
4659
4660         block_rsv = get_block_rsv(trans, root);
4661         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4662         if (block_rsv->space_info != cache->space_info)
4663                 goto out;
4664
4665         if (btrfs_header_generation(buf) == trans->transid) {
4666                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4667                         ret = check_ref_cleanup(trans, root, buf->start);
4668                         if (!ret)
4669                                 goto pin;
4670                 }
4671
4672                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4673                         pin_down_extent(root, cache, buf->start, buf->len, 1);
4674                         goto pin;
4675                 }
4676
4677                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4678
4679                 btrfs_add_free_space(cache, buf->start, buf->len);
4680                 ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
4681                 if (ret == -EAGAIN) {
4682                         /* block group became read-only */
4683                         btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
4684                         goto out;
4685                 }
4686
4687                 ret = 1;
4688                 spin_lock(&block_rsv->lock);
4689                 if (block_rsv->reserved < block_rsv->size) {
4690                         block_rsv->reserved += buf->len;
4691                         ret = 0;
4692                 }
4693                 spin_unlock(&block_rsv->lock);
4694
4695                 if (ret) {
4696                         spin_lock(&cache->space_info->lock);
4697                         cache->space_info->bytes_reserved -= buf->len;
4698                         cache->space_info->reservation_progress++;
4699                         spin_unlock(&cache->space_info->lock);
4700                 }
4701                 goto out;
4702         }
4703 pin:
4704         if (block_rsv->durable && !cache->ro) {
4705                 ret = 0;
4706                 spin_lock(&cache->lock);
4707                 if (!cache->ro) {
4708                         cache->reserved_pinned += buf->len;
4709                         ret = 1;
4710                 }
4711                 spin_unlock(&cache->lock);
4712
4713                 if (ret) {
4714                         spin_lock(&block_rsv->lock);
4715                         block_rsv->freed[trans->transid & 0x1] += buf->len;
4716                         spin_unlock(&block_rsv->lock);
4717                 }
4718         }
4719 out:
4720         /*
4721          * Deleting the buffer, clear the corrupt flag since it doesn't matter
4722          * anymore.
4723          */
4724         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4725         btrfs_put_block_group(cache);
4726 }
4727
4728 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4729                       struct btrfs_root *root,
4730                       u64 bytenr, u64 num_bytes, u64 parent,
4731                       u64 root_objectid, u64 owner, u64 offset)
4732 {
4733         int ret;
4734
4735         /*
4736          * tree log blocks never actually go into the extent allocation
4737          * tree, just update pinning info and exit early.
4738          */
4739         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4740                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4741                 /* unlocks the pinned mutex */
4742                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4743                 ret = 0;
4744         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4745                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4746                                         parent, root_objectid, (int)owner,
4747                                         BTRFS_DROP_DELAYED_REF, NULL);
4748                 BUG_ON(ret);
4749         } else {
4750                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4751                                         parent, root_objectid, owner,
4752                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4753                 BUG_ON(ret);
4754         }
4755         return ret;
4756 }
4757
4758 static u64 stripe_align(struct btrfs_root *root, u64 val)
4759 {
4760         u64 mask = ((u64)root->stripesize - 1);
4761         u64 ret = (val + mask) & ~mask;
4762         return ret;
4763 }
4764
4765 /*
4766  * when we wait for progress in the block group caching, its because
4767  * our allocation attempt failed at least once.  So, we must sleep
4768  * and let some progress happen before we try again.
4769  *
4770  * This function will sleep at least once waiting for new free space to
4771  * show up, and then it will check the block group free space numbers
4772  * for our min num_bytes.  Another option is to have it go ahead
4773  * and look in the rbtree for a free extent of a given size, but this
4774  * is a good start.
4775  */
4776 static noinline int
4777 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4778                                 u64 num_bytes)
4779 {
4780         struct btrfs_caching_control *caching_ctl;
4781         DEFINE_WAIT(wait);
4782
4783         caching_ctl = get_caching_control(cache);
4784         if (!caching_ctl)
4785                 return 0;
4786
4787         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4788                    (cache->free_space_ctl->free_space >= num_bytes));
4789
4790         put_caching_control(caching_ctl);
4791         return 0;
4792 }
4793
4794 static noinline int
4795 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4796 {
4797         struct btrfs_caching_control *caching_ctl;
4798         DEFINE_WAIT(wait);
4799
4800         caching_ctl = get_caching_control(cache);
4801         if (!caching_ctl)
4802                 return 0;
4803
4804         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4805
4806         put_caching_control(caching_ctl);
4807         return 0;
4808 }
4809
4810 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4811 {
4812         int index;
4813         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4814                 index = 0;
4815         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4816                 index = 1;
4817         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4818                 index = 2;
4819         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4820                 index = 3;
4821         else
4822                 index = 4;
4823         return index;
4824 }
4825
4826 enum btrfs_loop_type {
4827         LOOP_FIND_IDEAL = 0,
4828         LOOP_CACHING_NOWAIT = 1,
4829         LOOP_CACHING_WAIT = 2,
4830         LOOP_ALLOC_CHUNK = 3,
4831         LOOP_NO_EMPTY_SIZE = 4,
4832 };
4833
4834 /*
4835  * walks the btree of allocated extents and find a hole of a given size.
4836  * The key ins is changed to record the hole:
4837  * ins->objectid == block start
4838  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4839  * ins->offset == number of blocks
4840  * Any available blocks before search_start are skipped.
4841  */
4842 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4843                                      struct btrfs_root *orig_root,
4844                                      u64 num_bytes, u64 empty_size,
4845                                      u64 search_start, u64 search_end,
4846                                      u64 hint_byte, struct btrfs_key *ins,
4847                                      u64 data)
4848 {
4849         int ret = 0;
4850         struct btrfs_root *root = orig_root->fs_info->extent_root;
4851         struct btrfs_free_cluster *last_ptr = NULL;
4852         struct btrfs_block_group_cache *block_group = NULL;
4853         int empty_cluster = 2 * 1024 * 1024;
4854         int allowed_chunk_alloc = 0;
4855         int done_chunk_alloc = 0;
4856         struct btrfs_space_info *space_info;
4857         int last_ptr_loop = 0;
4858         int loop = 0;
4859         int index = 0;
4860         bool found_uncached_bg = false;
4861         bool failed_cluster_refill = false;
4862         bool failed_alloc = false;
4863         bool use_cluster = true;
4864         u64 ideal_cache_percent = 0;
4865         u64 ideal_cache_offset = 0;
4866
4867         WARN_ON(num_bytes < root->sectorsize);
4868         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4869         ins->objectid = 0;
4870         ins->offset = 0;
4871
4872         space_info = __find_space_info(root->fs_info, data);
4873         if (!space_info) {
4874                 printk(KERN_ERR "No space info for %llu\n", data);
4875                 return -ENOSPC;
4876         }
4877
4878         /*
4879          * If the space info is for both data and metadata it means we have a
4880          * small filesystem and we can't use the clustering stuff.
4881          */
4882         if (btrfs_mixed_space_info(space_info))
4883                 use_cluster = false;
4884
4885         if (orig_root->ref_cows || empty_size)
4886                 allowed_chunk_alloc = 1;
4887
4888         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4889                 last_ptr = &root->fs_info->meta_alloc_cluster;
4890                 if (!btrfs_test_opt(root, SSD))
4891                         empty_cluster = 64 * 1024;
4892         }
4893
4894         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4895             btrfs_test_opt(root, SSD)) {
4896                 last_ptr = &root->fs_info->data_alloc_cluster;
4897         }
4898
4899         if (last_ptr) {
4900                 spin_lock(&last_ptr->lock);
4901                 if (last_ptr->block_group)
4902                         hint_byte = last_ptr->window_start;
4903                 spin_unlock(&last_ptr->lock);
4904         }
4905
4906         search_start = max(search_start, first_logical_byte(root, 0));
4907         search_start = max(search_start, hint_byte);
4908
4909         if (!last_ptr)
4910                 empty_cluster = 0;
4911
4912         if (search_start == hint_byte) {
4913 ideal_cache:
4914                 block_group = btrfs_lookup_block_group(root->fs_info,
4915                                                        search_start);
4916                 /*
4917                  * we don't want to use the block group if it doesn't match our
4918                  * allocation bits, or if its not cached.
4919                  *
4920                  * However if we are re-searching with an ideal block group
4921                  * picked out then we don't care that the block group is cached.
4922                  */
4923                 if (block_group && block_group_bits(block_group, data) &&
4924                     (block_group->cached != BTRFS_CACHE_NO ||
4925                      search_start == ideal_cache_offset)) {
4926                         down_read(&space_info->groups_sem);
4927                         if (list_empty(&block_group->list) ||
4928                             block_group->ro) {
4929                                 /*
4930                                  * someone is removing this block group,
4931                                  * we can't jump into the have_block_group
4932                                  * target because our list pointers are not
4933                                  * valid
4934                                  */
4935                                 btrfs_put_block_group(block_group);
4936                                 up_read(&space_info->groups_sem);
4937                         } else {
4938                                 index = get_block_group_index(block_group);
4939                                 goto have_block_group;
4940                         }
4941                 } else if (block_group) {
4942                         btrfs_put_block_group(block_group);
4943                 }
4944         }
4945 search:
4946         down_read(&space_info->groups_sem);
4947         list_for_each_entry(block_group, &space_info->block_groups[index],
4948                             list) {
4949                 u64 offset;
4950                 int cached;
4951
4952                 btrfs_get_block_group(block_group);
4953                 search_start = block_group->key.objectid;
4954
4955                 /*
4956                  * this can happen if we end up cycling through all the
4957                  * raid types, but we want to make sure we only allocate
4958                  * for the proper type.
4959                  */
4960                 if (!block_group_bits(block_group, data)) {
4961                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
4962                                 BTRFS_BLOCK_GROUP_RAID1 |
4963                                 BTRFS_BLOCK_GROUP_RAID10;
4964
4965                         /*
4966                          * if they asked for extra copies and this block group
4967                          * doesn't provide them, bail.  This does allow us to
4968                          * fill raid0 from raid1.
4969                          */
4970                         if ((data & extra) && !(block_group->flags & extra))
4971                                 goto loop;
4972                 }
4973
4974 have_block_group:
4975                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4976                         u64 free_percent;
4977
4978                         ret = cache_block_group(block_group, trans,
4979                                                 orig_root, 1);
4980                         if (block_group->cached == BTRFS_CACHE_FINISHED)
4981                                 goto have_block_group;
4982
4983                         free_percent = btrfs_block_group_used(&block_group->item);
4984                         free_percent *= 100;
4985                         free_percent = div64_u64(free_percent,
4986                                                  block_group->key.offset);
4987                         free_percent = 100 - free_percent;
4988                         if (free_percent > ideal_cache_percent &&
4989                             likely(!block_group->ro)) {
4990                                 ideal_cache_offset = block_group->key.objectid;
4991                                 ideal_cache_percent = free_percent;
4992                         }
4993
4994                         /*
4995                          * We only want to start kthread caching if we are at
4996                          * the point where we will wait for caching to make
4997                          * progress, or if our ideal search is over and we've
4998                          * found somebody to start caching.
4999                          */
5000                         if (loop > LOOP_CACHING_NOWAIT ||
5001                             (loop > LOOP_FIND_IDEAL &&
5002                              atomic_read(&space_info->caching_threads) < 2)) {
5003                                 ret = cache_block_group(block_group, trans,
5004                                                         orig_root, 0);
5005                                 BUG_ON(ret);
5006                         }
5007                         found_uncached_bg = true;
5008
5009                         /*
5010                          * If loop is set for cached only, try the next block
5011                          * group.
5012                          */
5013                         if (loop == LOOP_FIND_IDEAL)
5014                                 goto loop;
5015                 }
5016
5017                 cached = block_group_cache_done(block_group);
5018                 if (unlikely(!cached))
5019                         found_uncached_bg = true;
5020
5021                 if (unlikely(block_group->ro))
5022                         goto loop;
5023
5024                 spin_lock(&block_group->free_space_ctl->tree_lock);
5025                 if (cached &&
5026                     block_group->free_space_ctl->free_space <
5027                     num_bytes + empty_size) {
5028                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5029                         goto loop;
5030                 }
5031                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5032
5033                 /*
5034                  * Ok we want to try and use the cluster allocator, so lets look
5035                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5036                  * have tried the cluster allocator plenty of times at this
5037                  * point and not have found anything, so we are likely way too
5038                  * fragmented for the clustering stuff to find anything, so lets
5039                  * just skip it and let the allocator find whatever block it can
5040                  * find
5041                  */
5042                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
5043                         /*
5044                          * the refill lock keeps out other
5045                          * people trying to start a new cluster
5046                          */
5047                         spin_lock(&last_ptr->refill_lock);
5048                         if (last_ptr->block_group &&
5049                             (last_ptr->block_group->ro ||
5050                             !block_group_bits(last_ptr->block_group, data))) {
5051                                 offset = 0;
5052                                 goto refill_cluster;
5053                         }
5054
5055                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5056                                                  num_bytes, search_start);
5057                         if (offset) {
5058                                 /* we have a block, we're done */
5059                                 spin_unlock(&last_ptr->refill_lock);
5060                                 goto checks;
5061                         }
5062
5063                         spin_lock(&last_ptr->lock);
5064                         /*
5065                          * whoops, this cluster doesn't actually point to
5066                          * this block group.  Get a ref on the block
5067                          * group is does point to and try again
5068                          */
5069                         if (!last_ptr_loop && last_ptr->block_group &&
5070                             last_ptr->block_group != block_group) {
5071
5072                                 btrfs_put_block_group(block_group);
5073                                 block_group = last_ptr->block_group;
5074                                 btrfs_get_block_group(block_group);
5075                                 spin_unlock(&last_ptr->lock);
5076                                 spin_unlock(&last_ptr->refill_lock);
5077
5078                                 last_ptr_loop = 1;
5079                                 search_start = block_group->key.objectid;
5080                                 /*
5081                                  * we know this block group is properly
5082                                  * in the list because
5083                                  * btrfs_remove_block_group, drops the
5084                                  * cluster before it removes the block
5085                                  * group from the list
5086                                  */
5087                                 goto have_block_group;
5088                         }
5089                         spin_unlock(&last_ptr->lock);
5090 refill_cluster:
5091                         /*
5092                          * this cluster didn't work out, free it and
5093                          * start over
5094                          */
5095                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5096
5097                         last_ptr_loop = 0;
5098
5099                         /* allocate a cluster in this block group */
5100                         ret = btrfs_find_space_cluster(trans, root,
5101                                                block_group, last_ptr,
5102                                                offset, num_bytes,
5103                                                empty_cluster + empty_size);
5104                         if (ret == 0) {
5105                                 /*
5106                                  * now pull our allocation out of this
5107                                  * cluster
5108                                  */
5109                                 offset = btrfs_alloc_from_cluster(block_group,
5110                                                   last_ptr, num_bytes,
5111                                                   search_start);
5112                                 if (offset) {
5113                                         /* we found one, proceed */
5114                                         spin_unlock(&last_ptr->refill_lock);
5115                                         goto checks;
5116                                 }
5117                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5118                                    && !failed_cluster_refill) {
5119                                 spin_unlock(&last_ptr->refill_lock);
5120
5121                                 failed_cluster_refill = true;
5122                                 wait_block_group_cache_progress(block_group,
5123                                        num_bytes + empty_cluster + empty_size);
5124                                 goto have_block_group;
5125                         }
5126
5127                         /*
5128                          * at this point we either didn't find a cluster
5129                          * or we weren't able to allocate a block from our
5130                          * cluster.  Free the cluster we've been trying
5131                          * to use, and go to the next block group
5132                          */
5133                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5134                         spin_unlock(&last_ptr->refill_lock);
5135                         goto loop;
5136                 }
5137
5138                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5139                                                     num_bytes, empty_size);
5140                 /*
5141                  * If we didn't find a chunk, and we haven't failed on this
5142                  * block group before, and this block group is in the middle of
5143                  * caching and we are ok with waiting, then go ahead and wait
5144                  * for progress to be made, and set failed_alloc to true.
5145                  *
5146                  * If failed_alloc is true then we've already waited on this
5147                  * block group once and should move on to the next block group.
5148                  */
5149                 if (!offset && !failed_alloc && !cached &&
5150                     loop > LOOP_CACHING_NOWAIT) {
5151                         wait_block_group_cache_progress(block_group,
5152                                                 num_bytes + empty_size);
5153                         failed_alloc = true;
5154                         goto have_block_group;
5155                 } else if (!offset) {
5156                         goto loop;
5157                 }
5158 checks:
5159                 search_start = stripe_align(root, offset);
5160                 /* move on to the next group */
5161                 if (search_start + num_bytes >= search_end) {
5162                         btrfs_add_free_space(block_group, offset, num_bytes);
5163                         goto loop;
5164                 }
5165
5166                 /* move on to the next group */
5167                 if (search_start + num_bytes >
5168                     block_group->key.objectid + block_group->key.offset) {
5169                         btrfs_add_free_space(block_group, offset, num_bytes);
5170                         goto loop;
5171                 }
5172
5173                 ins->objectid = search_start;
5174                 ins->offset = num_bytes;
5175
5176                 if (offset < search_start)
5177                         btrfs_add_free_space(block_group, offset,
5178                                              search_start - offset);
5179                 BUG_ON(offset > search_start);
5180
5181                 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
5182                                             (data & BTRFS_BLOCK_GROUP_DATA));
5183                 if (ret == -EAGAIN) {
5184                         btrfs_add_free_space(block_group, offset, num_bytes);
5185                         goto loop;
5186                 }
5187
5188                 /* we are all good, lets return */
5189                 ins->objectid = search_start;
5190                 ins->offset = num_bytes;
5191
5192                 if (offset < search_start)
5193                         btrfs_add_free_space(block_group, offset,
5194                                              search_start - offset);
5195                 BUG_ON(offset > search_start);
5196                 btrfs_put_block_group(block_group);
5197                 break;
5198 loop:
5199                 failed_cluster_refill = false;
5200                 failed_alloc = false;
5201                 BUG_ON(index != get_block_group_index(block_group));
5202                 btrfs_put_block_group(block_group);
5203         }
5204         up_read(&space_info->groups_sem);
5205
5206         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5207                 goto search;
5208
5209         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5210          *                      for them to make caching progress.  Also
5211          *                      determine the best possible bg to cache
5212          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5213          *                      caching kthreads as we move along
5214          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5215          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5216          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5217          *                      again
5218          */
5219         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5220                 index = 0;
5221                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5222                         found_uncached_bg = false;
5223                         loop++;
5224                         if (!ideal_cache_percent &&
5225                             atomic_read(&space_info->caching_threads))
5226                                 goto search;
5227
5228                         /*
5229                          * 1 of the following 2 things have happened so far
5230                          *
5231                          * 1) We found an ideal block group for caching that
5232                          * is mostly full and will cache quickly, so we might
5233                          * as well wait for it.
5234                          *
5235                          * 2) We searched for cached only and we didn't find
5236                          * anything, and we didn't start any caching kthreads
5237                          * either, so chances are we will loop through and
5238                          * start a couple caching kthreads, and then come back
5239                          * around and just wait for them.  This will be slower
5240                          * because we will have 2 caching kthreads reading at
5241                          * the same time when we could have just started one
5242                          * and waited for it to get far enough to give us an
5243                          * allocation, so go ahead and go to the wait caching
5244                          * loop.
5245                          */
5246                         loop = LOOP_CACHING_WAIT;
5247                         search_start = ideal_cache_offset;
5248                         ideal_cache_percent = 0;
5249                         goto ideal_cache;
5250                 } else if (loop == LOOP_FIND_IDEAL) {
5251                         /*
5252                          * Didn't find a uncached bg, wait on anything we find
5253                          * next.
5254                          */
5255                         loop = LOOP_CACHING_WAIT;
5256                         goto search;
5257                 }
5258
5259                 loop++;
5260
5261                 if (loop == LOOP_ALLOC_CHUNK) {
5262                        if (allowed_chunk_alloc) {
5263                                 ret = do_chunk_alloc(trans, root, num_bytes +
5264                                                      2 * 1024 * 1024, data,
5265                                                      CHUNK_ALLOC_LIMITED);
5266                                 allowed_chunk_alloc = 0;
5267                                 if (ret == 1)
5268                                         done_chunk_alloc = 1;
5269                         } else if (!done_chunk_alloc &&
5270                                    space_info->force_alloc ==
5271                                    CHUNK_ALLOC_NO_FORCE) {
5272                                 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5273                         }
5274
5275                        /*
5276                         * We didn't allocate a chunk, go ahead and drop the
5277                         * empty size and loop again.
5278                         */
5279                        if (!done_chunk_alloc)
5280                                loop = LOOP_NO_EMPTY_SIZE;
5281                 }
5282
5283                 if (loop == LOOP_NO_EMPTY_SIZE) {
5284                         empty_size = 0;
5285                         empty_cluster = 0;
5286                 }
5287
5288                 goto search;
5289         } else if (!ins->objectid) {
5290                 ret = -ENOSPC;
5291         } else if (ins->objectid) {
5292                 ret = 0;
5293         }
5294
5295         return ret;
5296 }
5297
5298 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5299                             int dump_block_groups)
5300 {
5301         struct btrfs_block_group_cache *cache;
5302         int index = 0;
5303
5304         spin_lock(&info->lock);
5305         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5306                (unsigned long long)(info->total_bytes - info->bytes_used -
5307                                     info->bytes_pinned - info->bytes_reserved -
5308                                     info->bytes_readonly),
5309                (info->full) ? "" : "not ");
5310         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5311                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5312                (unsigned long long)info->total_bytes,
5313                (unsigned long long)info->bytes_used,
5314                (unsigned long long)info->bytes_pinned,
5315                (unsigned long long)info->bytes_reserved,
5316                (unsigned long long)info->bytes_may_use,
5317                (unsigned long long)info->bytes_readonly);
5318         spin_unlock(&info->lock);
5319
5320         if (!dump_block_groups)
5321                 return;
5322
5323         down_read(&info->groups_sem);
5324 again:
5325         list_for_each_entry(cache, &info->block_groups[index], list) {
5326                 spin_lock(&cache->lock);
5327                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5328                        "%llu pinned %llu reserved\n",
5329                        (unsigned long long)cache->key.objectid,
5330                        (unsigned long long)cache->key.offset,
5331                        (unsigned long long)btrfs_block_group_used(&cache->item),
5332                        (unsigned long long)cache->pinned,
5333                        (unsigned long long)cache->reserved);
5334                 btrfs_dump_free_space(cache, bytes);
5335                 spin_unlock(&cache->lock);
5336         }
5337         if (++index < BTRFS_NR_RAID_TYPES)
5338                 goto again;
5339         up_read(&info->groups_sem);
5340 }
5341
5342 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5343                          struct btrfs_root *root,
5344                          u64 num_bytes, u64 min_alloc_size,
5345                          u64 empty_size, u64 hint_byte,
5346                          u64 search_end, struct btrfs_key *ins,
5347                          u64 data)
5348 {
5349         int ret;
5350         u64 search_start = 0;
5351
5352         data = btrfs_get_alloc_profile(root, data);
5353 again:
5354         /*
5355          * the only place that sets empty_size is btrfs_realloc_node, which
5356          * is not called recursively on allocations
5357          */
5358         if (empty_size || root->ref_cows)
5359                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5360                                      num_bytes + 2 * 1024 * 1024, data,
5361                                      CHUNK_ALLOC_NO_FORCE);
5362
5363         WARN_ON(num_bytes < root->sectorsize);
5364         ret = find_free_extent(trans, root, num_bytes, empty_size,
5365                                search_start, search_end, hint_byte,
5366                                ins, data);
5367
5368         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5369                 num_bytes = num_bytes >> 1;
5370                 num_bytes = num_bytes & ~(root->sectorsize - 1);
5371                 num_bytes = max(num_bytes, min_alloc_size);
5372                 do_chunk_alloc(trans, root->fs_info->extent_root,
5373                                num_bytes, data, CHUNK_ALLOC_FORCE);
5374                 goto again;
5375         }
5376         if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5377                 struct btrfs_space_info *sinfo;
5378
5379                 sinfo = __find_space_info(root->fs_info, data);
5380                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5381                        "wanted %llu\n", (unsigned long long)data,
5382                        (unsigned long long)num_bytes);
5383                 dump_space_info(sinfo, num_bytes, 1);
5384         }
5385
5386         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5387
5388         return ret;
5389 }
5390
5391 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5392 {
5393         struct btrfs_block_group_cache *cache;
5394         int ret = 0;
5395
5396         cache = btrfs_lookup_block_group(root->fs_info, start);
5397         if (!cache) {
5398                 printk(KERN_ERR "Unable to find block group for %llu\n",
5399                        (unsigned long long)start);
5400                 return -ENOSPC;
5401         }
5402
5403         if (btrfs_test_opt(root, DISCARD))
5404                 ret = btrfs_discard_extent(root, start, len, NULL);
5405
5406         btrfs_add_free_space(cache, start, len);
5407         btrfs_update_reserved_bytes(cache, len, 0, 1);
5408         btrfs_put_block_group(cache);
5409
5410         trace_btrfs_reserved_extent_free(root, start, len);
5411
5412         return ret;
5413 }
5414
5415 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5416                                       struct btrfs_root *root,
5417                                       u64 parent, u64 root_objectid,
5418                                       u64 flags, u64 owner, u64 offset,
5419                                       struct btrfs_key *ins, int ref_mod)
5420 {
5421         int ret;
5422         struct btrfs_fs_info *fs_info = root->fs_info;
5423         struct btrfs_extent_item *extent_item;
5424         struct btrfs_extent_inline_ref *iref;
5425         struct btrfs_path *path;
5426         struct extent_buffer *leaf;
5427         int type;
5428         u32 size;
5429
5430         if (parent > 0)
5431                 type = BTRFS_SHARED_DATA_REF_KEY;
5432         else
5433                 type = BTRFS_EXTENT_DATA_REF_KEY;
5434
5435         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5436
5437         path = btrfs_alloc_path();
5438         if (!path)
5439                 return -ENOMEM;
5440
5441         path->leave_spinning = 1;
5442         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5443                                       ins, size);
5444         BUG_ON(ret);
5445
5446         leaf = path->nodes[0];
5447         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5448                                      struct btrfs_extent_item);
5449         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5450         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5451         btrfs_set_extent_flags(leaf, extent_item,
5452                                flags | BTRFS_EXTENT_FLAG_DATA);
5453
5454         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5455         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5456         if (parent > 0) {
5457                 struct btrfs_shared_data_ref *ref;
5458                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5459                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5460                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5461         } else {
5462                 struct btrfs_extent_data_ref *ref;
5463                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5464                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5465                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5466                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5467                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5468         }
5469
5470         btrfs_mark_buffer_dirty(path->nodes[0]);
5471         btrfs_free_path(path);
5472
5473         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5474         if (ret) {
5475                 printk(KERN_ERR "btrfs update block group failed for %llu "
5476                        "%llu\n", (unsigned long long)ins->objectid,
5477                        (unsigned long long)ins->offset);
5478                 BUG();
5479         }
5480         return ret;
5481 }
5482
5483 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5484                                      struct btrfs_root *root,
5485                                      u64 parent, u64 root_objectid,
5486                                      u64 flags, struct btrfs_disk_key *key,
5487                                      int level, struct btrfs_key *ins)
5488 {
5489         int ret;
5490         struct btrfs_fs_info *fs_info = root->fs_info;
5491         struct btrfs_extent_item *extent_item;
5492         struct btrfs_tree_block_info *block_info;
5493         struct btrfs_extent_inline_ref *iref;
5494         struct btrfs_path *path;
5495         struct extent_buffer *leaf;
5496         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5497
5498         path = btrfs_alloc_path();
5499         if (!path)
5500                 return -ENOMEM;
5501
5502         path->leave_spinning = 1;
5503         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5504                                       ins, size);
5505         BUG_ON(ret);
5506
5507         leaf = path->nodes[0];
5508         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5509                                      struct btrfs_extent_item);
5510         btrfs_set_extent_refs(leaf, extent_item, 1);
5511         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5512         btrfs_set_extent_flags(leaf, extent_item,
5513                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5514         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5515
5516         btrfs_set_tree_block_key(leaf, block_info, key);
5517         btrfs_set_tree_block_level(leaf, block_info, level);
5518
5519         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5520         if (parent > 0) {
5521                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5522                 btrfs_set_extent_inline_ref_type(leaf, iref,
5523                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5524                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5525         } else {
5526                 btrfs_set_extent_inline_ref_type(leaf, iref,
5527                                                  BTRFS_TREE_BLOCK_REF_KEY);
5528                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5529         }
5530
5531         btrfs_mark_buffer_dirty(leaf);
5532         btrfs_free_path(path);
5533
5534         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5535         if (ret) {
5536                 printk(KERN_ERR "btrfs update block group failed for %llu "
5537                        "%llu\n", (unsigned long long)ins->objectid,
5538                        (unsigned long long)ins->offset);
5539                 BUG();
5540         }
5541         return ret;
5542 }
5543
5544 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5545                                      struct btrfs_root *root,
5546                                      u64 root_objectid, u64 owner,
5547                                      u64 offset, struct btrfs_key *ins)
5548 {
5549         int ret;
5550
5551         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5552
5553         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5554                                          0, root_objectid, owner, offset,
5555                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
5556         return ret;
5557 }
5558
5559 /*
5560  * this is used by the tree logging recovery code.  It records that
5561  * an extent has been allocated and makes sure to clear the free
5562  * space cache bits as well
5563  */
5564 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5565                                    struct btrfs_root *root,
5566                                    u64 root_objectid, u64 owner, u64 offset,
5567                                    struct btrfs_key *ins)
5568 {
5569         int ret;
5570         struct btrfs_block_group_cache *block_group;
5571         struct btrfs_caching_control *caching_ctl;
5572         u64 start = ins->objectid;
5573         u64 num_bytes = ins->offset;
5574
5575         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5576         cache_block_group(block_group, trans, NULL, 0);
5577         caching_ctl = get_caching_control(block_group);
5578
5579         if (!caching_ctl) {
5580                 BUG_ON(!block_group_cache_done(block_group));
5581                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5582                 BUG_ON(ret);
5583         } else {
5584                 mutex_lock(&caching_ctl->mutex);
5585
5586                 if (start >= caching_ctl->progress) {
5587                         ret = add_excluded_extent(root, start, num_bytes);
5588                         BUG_ON(ret);
5589                 } else if (start + num_bytes <= caching_ctl->progress) {
5590                         ret = btrfs_remove_free_space(block_group,
5591                                                       start, num_bytes);
5592                         BUG_ON(ret);
5593                 } else {
5594                         num_bytes = caching_ctl->progress - start;
5595                         ret = btrfs_remove_free_space(block_group,
5596                                                       start, num_bytes);
5597                         BUG_ON(ret);
5598
5599                         start = caching_ctl->progress;
5600                         num_bytes = ins->objectid + ins->offset -
5601                                     caching_ctl->progress;
5602                         ret = add_excluded_extent(root, start, num_bytes);
5603                         BUG_ON(ret);
5604                 }
5605
5606                 mutex_unlock(&caching_ctl->mutex);
5607                 put_caching_control(caching_ctl);
5608         }
5609
5610         ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
5611         BUG_ON(ret);
5612         btrfs_put_block_group(block_group);
5613         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5614                                          0, owner, offset, ins, 1);
5615         return ret;
5616 }
5617
5618 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5619                                             struct btrfs_root *root,
5620                                             u64 bytenr, u32 blocksize,
5621                                             int level)
5622 {
5623         struct extent_buffer *buf;
5624
5625         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5626         if (!buf)
5627                 return ERR_PTR(-ENOMEM);
5628         btrfs_set_header_generation(buf, trans->transid);
5629         btrfs_set_buffer_lockdep_class(buf, level);
5630         btrfs_tree_lock(buf);
5631         clean_tree_block(trans, root, buf);
5632
5633         btrfs_set_lock_blocking(buf);
5634         btrfs_set_buffer_uptodate(buf);
5635
5636         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5637                 /*
5638                  * we allow two log transactions at a time, use different
5639                  * EXENT bit to differentiate dirty pages.
5640                  */
5641                 if (root->log_transid % 2 == 0)
5642                         set_extent_dirty(&root->dirty_log_pages, buf->start,
5643                                         buf->start + buf->len - 1, GFP_NOFS);
5644                 else
5645                         set_extent_new(&root->dirty_log_pages, buf->start,
5646                                         buf->start + buf->len - 1, GFP_NOFS);
5647         } else {
5648                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5649                          buf->start + buf->len - 1, GFP_NOFS);
5650         }
5651         trans->blocks_used++;
5652         /* this returns a buffer locked for blocking */
5653         return buf;
5654 }
5655
5656 static struct btrfs_block_rsv *
5657 use_block_rsv(struct btrfs_trans_handle *trans,
5658               struct btrfs_root *root, u32 blocksize)
5659 {
5660         struct btrfs_block_rsv *block_rsv;
5661         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5662         int ret;
5663
5664         block_rsv = get_block_rsv(trans, root);
5665
5666         if (block_rsv->size == 0) {
5667                 ret = reserve_metadata_bytes(trans, root, block_rsv,
5668                                              blocksize, 0);
5669                 /*
5670                  * If we couldn't reserve metadata bytes try and use some from
5671                  * the global reserve.
5672                  */
5673                 if (ret && block_rsv != global_rsv) {
5674                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5675                         if (!ret)
5676                                 return global_rsv;
5677                         return ERR_PTR(ret);
5678                 } else if (ret) {
5679                         return ERR_PTR(ret);
5680                 }
5681                 return block_rsv;
5682         }
5683
5684         ret = block_rsv_use_bytes(block_rsv, blocksize);
5685         if (!ret)
5686                 return block_rsv;
5687         if (ret) {
5688                 WARN_ON(1);
5689                 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5690                                              0);
5691                 if (!ret) {
5692                         spin_lock(&block_rsv->lock);
5693                         block_rsv->size += blocksize;
5694                         spin_unlock(&block_rsv->lock);
5695                         return block_rsv;
5696                 } else if (ret && block_rsv != global_rsv) {
5697                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5698                         if (!ret)
5699                                 return global_rsv;
5700                 }
5701         }
5702
5703         return ERR_PTR(-ENOSPC);
5704 }
5705
5706 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5707 {
5708         block_rsv_add_bytes(block_rsv, blocksize, 0);
5709         block_rsv_release_bytes(block_rsv, NULL, 0);
5710 }
5711
5712 /*
5713  * finds a free extent and does all the dirty work required for allocation
5714  * returns the key for the extent through ins, and a tree buffer for
5715  * the first block of the extent through buf.
5716  *
5717  * returns the tree buffer or NULL.
5718  */
5719 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5720                                         struct btrfs_root *root, u32 blocksize,
5721                                         u64 parent, u64 root_objectid,
5722                                         struct btrfs_disk_key *key, int level,
5723                                         u64 hint, u64 empty_size)
5724 {
5725         struct btrfs_key ins;
5726         struct btrfs_block_rsv *block_rsv;
5727         struct extent_buffer *buf;
5728         u64 flags = 0;
5729         int ret;
5730
5731
5732         block_rsv = use_block_rsv(trans, root, blocksize);
5733         if (IS_ERR(block_rsv))
5734                 return ERR_CAST(block_rsv);
5735
5736         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5737                                    empty_size, hint, (u64)-1, &ins, 0);
5738         if (ret) {
5739                 unuse_block_rsv(block_rsv, blocksize);
5740                 return ERR_PTR(ret);
5741         }
5742
5743         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5744                                     blocksize, level);
5745         BUG_ON(IS_ERR(buf));
5746
5747         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5748                 if (parent == 0)
5749                         parent = ins.objectid;
5750                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5751         } else
5752                 BUG_ON(parent > 0);
5753
5754         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5755                 struct btrfs_delayed_extent_op *extent_op;
5756                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5757                 BUG_ON(!extent_op);
5758                 if (key)
5759                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
5760                 else
5761                         memset(&extent_op->key, 0, sizeof(extent_op->key));
5762                 extent_op->flags_to_set = flags;
5763                 extent_op->update_key = 1;
5764                 extent_op->update_flags = 1;
5765                 extent_op->is_data = 0;
5766
5767                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5768                                         ins.offset, parent, root_objectid,
5769                                         level, BTRFS_ADD_DELAYED_EXTENT,
5770                                         extent_op);
5771                 BUG_ON(ret);
5772         }
5773         return buf;
5774 }
5775
5776 struct walk_control {
5777         u64 refs[BTRFS_MAX_LEVEL];
5778         u64 flags[BTRFS_MAX_LEVEL];
5779         struct btrfs_key update_progress;
5780         int stage;
5781         int level;
5782         int shared_level;
5783         int update_ref;
5784         int keep_locks;
5785         int reada_slot;
5786         int reada_count;
5787 };
5788
5789 #define DROP_REFERENCE  1
5790 #define UPDATE_BACKREF  2
5791
5792 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5793                                      struct btrfs_root *root,
5794                                      struct walk_control *wc,
5795                                      struct btrfs_path *path)
5796 {
5797         u64 bytenr;
5798         u64 generation;
5799         u64 refs;
5800         u64 flags;
5801         u32 nritems;
5802         u32 blocksize;
5803         struct btrfs_key key;
5804         struct extent_buffer *eb;
5805         int ret;
5806         int slot;
5807         int nread = 0;
5808
5809         if (path->slots[wc->level] < wc->reada_slot) {
5810                 wc->reada_count = wc->reada_count * 2 / 3;
5811                 wc->reada_count = max(wc->reada_count, 2);
5812         } else {
5813                 wc->reada_count = wc->reada_count * 3 / 2;
5814                 wc->reada_count = min_t(int, wc->reada_count,
5815                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5816         }
5817
5818         eb = path->nodes[wc->level];
5819         nritems = btrfs_header_nritems(eb);
5820         blocksize = btrfs_level_size(root, wc->level - 1);
5821
5822         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5823                 if (nread >= wc->reada_count)
5824                         break;
5825
5826                 cond_resched();
5827                 bytenr = btrfs_node_blockptr(eb, slot);
5828                 generation = btrfs_node_ptr_generation(eb, slot);
5829
5830                 if (slot == path->slots[wc->level])
5831                         goto reada;
5832
5833                 if (wc->stage == UPDATE_BACKREF &&
5834                     generation <= root->root_key.offset)
5835                         continue;
5836
5837                 /* We don't lock the tree block, it's OK to be racy here */
5838                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5839                                                &refs, &flags);
5840                 BUG_ON(ret);
5841                 BUG_ON(refs == 0);
5842
5843                 if (wc->stage == DROP_REFERENCE) {
5844                         if (refs == 1)
5845                                 goto reada;
5846
5847                         if (wc->level == 1 &&
5848                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5849                                 continue;
5850                         if (!wc->update_ref ||
5851                             generation <= root->root_key.offset)
5852                                 continue;
5853                         btrfs_node_key_to_cpu(eb, &key, slot);
5854                         ret = btrfs_comp_cpu_keys(&key,
5855                                                   &wc->update_progress);
5856                         if (ret < 0)
5857                                 continue;
5858                 } else {
5859                         if (wc->level == 1 &&
5860                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5861                                 continue;
5862                 }
5863 reada:
5864                 ret = readahead_tree_block(root, bytenr, blocksize,
5865                                            generation);
5866                 if (ret)
5867                         break;
5868                 nread++;
5869         }
5870         wc->reada_slot = slot;
5871 }
5872
5873 /*
5874  * hepler to process tree block while walking down the tree.
5875  *
5876  * when wc->stage == UPDATE_BACKREF, this function updates
5877  * back refs for pointers in the block.
5878  *
5879  * NOTE: return value 1 means we should stop walking down.
5880  */
5881 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5882                                    struct btrfs_root *root,
5883                                    struct btrfs_path *path,
5884                                    struct walk_control *wc, int lookup_info)
5885 {
5886         int level = wc->level;
5887         struct extent_buffer *eb = path->nodes[level];
5888         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5889         int ret;
5890
5891         if (wc->stage == UPDATE_BACKREF &&
5892             btrfs_header_owner(eb) != root->root_key.objectid)
5893                 return 1;
5894
5895         /*
5896          * when reference count of tree block is 1, it won't increase
5897          * again. once full backref flag is set, we never clear it.
5898          */
5899         if (lookup_info &&
5900             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5901              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5902                 BUG_ON(!path->locks[level]);
5903                 ret = btrfs_lookup_extent_info(trans, root,
5904                                                eb->start, eb->len,
5905                                                &wc->refs[level],
5906                                                &wc->flags[level]);
5907                 BUG_ON(ret);
5908                 BUG_ON(wc->refs[level] == 0);
5909         }
5910
5911         if (wc->stage == DROP_REFERENCE) {
5912                 if (wc->refs[level] > 1)
5913                         return 1;
5914
5915                 if (path->locks[level] && !wc->keep_locks) {
5916                         btrfs_tree_unlock(eb);
5917                         path->locks[level] = 0;
5918                 }
5919                 return 0;
5920         }
5921
5922         /* wc->stage == UPDATE_BACKREF */
5923         if (!(wc->flags[level] & flag)) {
5924                 BUG_ON(!path->locks[level]);
5925                 ret = btrfs_inc_ref(trans, root, eb, 1);
5926                 BUG_ON(ret);
5927                 ret = btrfs_dec_ref(trans, root, eb, 0);
5928                 BUG_ON(ret);
5929                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5930                                                   eb->len, flag, 0);
5931                 BUG_ON(ret);
5932                 wc->flags[level] |= flag;
5933         }
5934
5935         /*
5936          * the block is shared by multiple trees, so it's not good to
5937          * keep the tree lock
5938          */
5939         if (path->locks[level] && level > 0) {
5940                 btrfs_tree_unlock(eb);
5941                 path->locks[level] = 0;
5942         }
5943         return 0;
5944 }
5945
5946 /*
5947  * hepler to process tree block pointer.
5948  *
5949  * when wc->stage == DROP_REFERENCE, this function checks
5950  * reference count of the block pointed to. if the block
5951  * is shared and we need update back refs for the subtree
5952  * rooted at the block, this function changes wc->stage to
5953  * UPDATE_BACKREF. if the block is shared and there is no
5954  * need to update back, this function drops the reference
5955  * to the block.
5956  *
5957  * NOTE: return value 1 means we should stop walking down.
5958  */
5959 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5960                                  struct btrfs_root *root,
5961                                  struct btrfs_path *path,
5962                                  struct walk_control *wc, int *lookup_info)
5963 {
5964         u64 bytenr;
5965         u64 generation;
5966         u64 parent;
5967         u32 blocksize;
5968         struct btrfs_key key;
5969         struct extent_buffer *next;
5970         int level = wc->level;
5971         int reada = 0;
5972         int ret = 0;
5973
5974         generation = btrfs_node_ptr_generation(path->nodes[level],
5975                                                path->slots[level]);
5976         /*
5977          * if the lower level block was created before the snapshot
5978          * was created, we know there is no need to update back refs
5979          * for the subtree
5980          */
5981         if (wc->stage == UPDATE_BACKREF &&
5982             generation <= root->root_key.offset) {
5983                 *lookup_info = 1;
5984                 return 1;
5985         }
5986
5987         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5988         blocksize = btrfs_level_size(root, level - 1);
5989
5990         next = btrfs_find_tree_block(root, bytenr, blocksize);
5991         if (!next) {
5992                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5993                 if (!next)
5994                         return -ENOMEM;
5995                 reada = 1;
5996         }
5997         btrfs_tree_lock(next);
5998         btrfs_set_lock_blocking(next);
5999
6000         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6001                                        &wc->refs[level - 1],
6002                                        &wc->flags[level - 1]);
6003         BUG_ON(ret);
6004         BUG_ON(wc->refs[level - 1] == 0);
6005         *lookup_info = 0;
6006
6007         if (wc->stage == DROP_REFERENCE) {
6008                 if (wc->refs[level - 1] > 1) {
6009                         if (level == 1 &&
6010                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6011                                 goto skip;
6012
6013                         if (!wc->update_ref ||
6014                             generation <= root->root_key.offset)
6015                                 goto skip;
6016
6017                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6018                                               path->slots[level]);
6019                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6020                         if (ret < 0)
6021                                 goto skip;
6022
6023                         wc->stage = UPDATE_BACKREF;
6024                         wc->shared_level = level - 1;
6025                 }
6026         } else {
6027                 if (level == 1 &&
6028                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6029                         goto skip;
6030         }
6031
6032         if (!btrfs_buffer_uptodate(next, generation)) {
6033                 btrfs_tree_unlock(next);
6034                 free_extent_buffer(next);
6035                 next = NULL;
6036                 *lookup_info = 1;
6037         }
6038
6039         if (!next) {
6040                 if (reada && level == 1)
6041                         reada_walk_down(trans, root, wc, path);
6042                 next = read_tree_block(root, bytenr, blocksize, generation);
6043                 if (!next)
6044                         return -EIO;
6045                 btrfs_tree_lock(next);
6046                 btrfs_set_lock_blocking(next);
6047         }
6048
6049         level--;
6050         BUG_ON(level != btrfs_header_level(next));
6051         path->nodes[level] = next;
6052         path->slots[level] = 0;
6053         path->locks[level] = 1;
6054         wc->level = level;
6055         if (wc->level == 1)
6056                 wc->reada_slot = 0;
6057         return 0;
6058 skip:
6059         wc->refs[level - 1] = 0;
6060         wc->flags[level - 1] = 0;
6061         if (wc->stage == DROP_REFERENCE) {
6062                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6063                         parent = path->nodes[level]->start;
6064                 } else {
6065                         BUG_ON(root->root_key.objectid !=
6066                                btrfs_header_owner(path->nodes[level]));
6067                         parent = 0;
6068                 }
6069
6070                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6071                                         root->root_key.objectid, level - 1, 0);
6072                 BUG_ON(ret);
6073         }
6074         btrfs_tree_unlock(next);
6075         free_extent_buffer(next);
6076         *lookup_info = 1;
6077         return 1;
6078 }
6079
6080 /*
6081  * hepler to process tree block while walking up the tree.
6082  *
6083  * when wc->stage == DROP_REFERENCE, this function drops
6084  * reference count on the block.
6085  *
6086  * when wc->stage == UPDATE_BACKREF, this function changes
6087  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6088  * to UPDATE_BACKREF previously while processing the block.
6089  *
6090  * NOTE: return value 1 means we should stop walking up.
6091  */
6092 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6093                                  struct btrfs_root *root,
6094                                  struct btrfs_path *path,
6095                                  struct walk_control *wc)
6096 {
6097         int ret;
6098         int level = wc->level;
6099         struct extent_buffer *eb = path->nodes[level];
6100         u64 parent = 0;
6101
6102         if (wc->stage == UPDATE_BACKREF) {
6103                 BUG_ON(wc->shared_level < level);
6104                 if (level < wc->shared_level)
6105                         goto out;
6106
6107                 ret = find_next_key(path, level + 1, &wc->update_progress);
6108                 if (ret > 0)
6109                         wc->update_ref = 0;
6110
6111                 wc->stage = DROP_REFERENCE;
6112                 wc->shared_level = -1;
6113                 path->slots[level] = 0;
6114
6115                 /*
6116                  * check reference count again if the block isn't locked.
6117                  * we should start walking down the tree again if reference
6118                  * count is one.
6119                  */
6120                 if (!path->locks[level]) {
6121                         BUG_ON(level == 0);
6122                         btrfs_tree_lock(eb);
6123                         btrfs_set_lock_blocking(eb);
6124                         path->locks[level] = 1;
6125
6126                         ret = btrfs_lookup_extent_info(trans, root,
6127                                                        eb->start, eb->len,
6128                                                        &wc->refs[level],
6129                                                        &wc->flags[level]);
6130                         BUG_ON(ret);
6131                         BUG_ON(wc->refs[level] == 0);
6132                         if (wc->refs[level] == 1) {
6133                                 btrfs_tree_unlock(eb);
6134                                 path->locks[level] = 0;
6135                                 return 1;
6136                         }
6137                 }
6138         }
6139
6140         /* wc->stage == DROP_REFERENCE */
6141         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6142
6143         if (wc->refs[level] == 1) {
6144                 if (level == 0) {
6145                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6146                                 ret = btrfs_dec_ref(trans, root, eb, 1);
6147                         else
6148                                 ret = btrfs_dec_ref(trans, root, eb, 0);
6149                         BUG_ON(ret);
6150                 }
6151                 /* make block locked assertion in clean_tree_block happy */
6152                 if (!path->locks[level] &&
6153                     btrfs_header_generation(eb) == trans->transid) {
6154                         btrfs_tree_lock(eb);
6155                         btrfs_set_lock_blocking(eb);
6156                         path->locks[level] = 1;
6157                 }
6158                 clean_tree_block(trans, root, eb);
6159         }
6160
6161         if (eb == root->node) {
6162                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6163                         parent = eb->start;
6164                 else
6165                         BUG_ON(root->root_key.objectid !=
6166                                btrfs_header_owner(eb));
6167         } else {
6168                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6169                         parent = path->nodes[level + 1]->start;
6170                 else
6171                         BUG_ON(root->root_key.objectid !=
6172                                btrfs_header_owner(path->nodes[level + 1]));
6173         }
6174
6175         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6176 out:
6177         wc->refs[level] = 0;
6178         wc->flags[level] = 0;
6179         return 0;
6180 }
6181
6182 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6183                                    struct btrfs_root *root,
6184                                    struct btrfs_path *path,
6185                                    struct walk_control *wc)
6186 {
6187         int level = wc->level;
6188         int lookup_info = 1;
6189         int ret;
6190
6191         while (level >= 0) {
6192                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6193                 if (ret > 0)
6194                         break;
6195
6196                 if (level == 0)
6197                         break;
6198
6199                 if (path->slots[level] >=
6200                     btrfs_header_nritems(path->nodes[level]))
6201                         break;
6202
6203                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6204                 if (ret > 0) {
6205                         path->slots[level]++;
6206                         continue;
6207                 } else if (ret < 0)
6208                         return ret;
6209                 level = wc->level;
6210         }
6211         return 0;
6212 }
6213
6214 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6215                                  struct btrfs_root *root,
6216                                  struct btrfs_path *path,
6217                                  struct walk_control *wc, int max_level)
6218 {
6219         int level = wc->level;
6220         int ret;
6221
6222         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6223         while (level < max_level && path->nodes[level]) {
6224                 wc->level = level;
6225                 if (path->slots[level] + 1 <
6226                     btrfs_header_nritems(path->nodes[level])) {
6227                         path->slots[level]++;
6228                         return 0;
6229                 } else {
6230                         ret = walk_up_proc(trans, root, path, wc);
6231                         if (ret > 0)
6232                                 return 0;
6233
6234                         if (path->locks[level]) {
6235                                 btrfs_tree_unlock(path->nodes[level]);
6236                                 path->locks[level] = 0;
6237                         }
6238                         free_extent_buffer(path->nodes[level]);
6239                         path->nodes[level] = NULL;
6240                         level++;
6241                 }
6242         }
6243         return 1;
6244 }
6245
6246 /*
6247  * drop a subvolume tree.
6248  *
6249  * this function traverses the tree freeing any blocks that only
6250  * referenced by the tree.
6251  *
6252  * when a shared tree block is found. this function decreases its
6253  * reference count by one. if update_ref is true, this function
6254  * also make sure backrefs for the shared block and all lower level
6255  * blocks are properly updated.
6256  */
6257 int btrfs_drop_snapshot(struct btrfs_root *root,
6258                         struct btrfs_block_rsv *block_rsv, int update_ref)
6259 {
6260         struct btrfs_path *path;
6261         struct btrfs_trans_handle *trans;
6262         struct btrfs_root *tree_root = root->fs_info->tree_root;
6263         struct btrfs_root_item *root_item = &root->root_item;
6264         struct walk_control *wc;
6265         struct btrfs_key key;
6266         int err = 0;
6267         int ret;
6268         int level;
6269
6270         path = btrfs_alloc_path();
6271         BUG_ON(!path);
6272
6273         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6274         BUG_ON(!wc);
6275
6276         trans = btrfs_start_transaction(tree_root, 0);
6277         BUG_ON(IS_ERR(trans));
6278
6279         if (block_rsv)
6280                 trans->block_rsv = block_rsv;
6281
6282         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6283                 level = btrfs_header_level(root->node);
6284                 path->nodes[level] = btrfs_lock_root_node(root);
6285                 btrfs_set_lock_blocking(path->nodes[level]);
6286                 path->slots[level] = 0;
6287                 path->locks[level] = 1;
6288                 memset(&wc->update_progress, 0,
6289                        sizeof(wc->update_progress));
6290         } else {
6291                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6292                 memcpy(&wc->update_progress, &key,
6293                        sizeof(wc->update_progress));
6294
6295                 level = root_item->drop_level;
6296                 BUG_ON(level == 0);
6297                 path->lowest_level = level;
6298                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6299                 path->lowest_level = 0;
6300                 if (ret < 0) {
6301                         err = ret;
6302                         goto out;
6303                 }
6304                 WARN_ON(ret > 0);
6305
6306                 /*
6307                  * unlock our path, this is safe because only this
6308                  * function is allowed to delete this snapshot
6309                  */
6310                 btrfs_unlock_up_safe(path, 0);
6311
6312                 level = btrfs_header_level(root->node);
6313                 while (1) {
6314                         btrfs_tree_lock(path->nodes[level]);
6315                         btrfs_set_lock_blocking(path->nodes[level]);
6316
6317                         ret = btrfs_lookup_extent_info(trans, root,
6318                                                 path->nodes[level]->start,
6319                                                 path->nodes[level]->len,
6320                                                 &wc->refs[level],
6321                                                 &wc->flags[level]);
6322                         BUG_ON(ret);
6323                         BUG_ON(wc->refs[level] == 0);
6324
6325                         if (level == root_item->drop_level)
6326                                 break;
6327
6328                         btrfs_tree_unlock(path->nodes[level]);
6329                         WARN_ON(wc->refs[level] != 1);
6330                         level--;
6331                 }
6332         }
6333
6334         wc->level = level;
6335         wc->shared_level = -1;
6336         wc->stage = DROP_REFERENCE;
6337         wc->update_ref = update_ref;
6338         wc->keep_locks = 0;
6339         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6340
6341         while (1) {
6342                 ret = walk_down_tree(trans, root, path, wc);
6343                 if (ret < 0) {
6344                         err = ret;
6345                         break;
6346                 }
6347
6348                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6349                 if (ret < 0) {
6350                         err = ret;
6351                         break;
6352                 }
6353
6354                 if (ret > 0) {
6355                         BUG_ON(wc->stage != DROP_REFERENCE);
6356                         break;
6357                 }
6358
6359                 if (wc->stage == DROP_REFERENCE) {
6360                         level = wc->level;
6361                         btrfs_node_key(path->nodes[level],
6362                                        &root_item->drop_progress,
6363                                        path->slots[level]);
6364                         root_item->drop_level = level;
6365                 }
6366
6367                 BUG_ON(wc->level == 0);
6368                 if (btrfs_should_end_transaction(trans, tree_root)) {
6369                         ret = btrfs_update_root(trans, tree_root,
6370                                                 &root->root_key,
6371                                                 root_item);
6372                         BUG_ON(ret);
6373
6374                         btrfs_end_transaction_throttle(trans, tree_root);
6375                         trans = btrfs_start_transaction(tree_root, 0);
6376                         BUG_ON(IS_ERR(trans));
6377                         if (block_rsv)
6378                                 trans->block_rsv = block_rsv;
6379                 }
6380         }
6381         btrfs_release_path(path);
6382         BUG_ON(err);
6383
6384         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6385         BUG_ON(ret);
6386
6387         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6388                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6389                                            NULL, NULL);
6390                 BUG_ON(ret < 0);
6391                 if (ret > 0) {
6392                         /* if we fail to delete the orphan item this time
6393                          * around, it'll get picked up the next time.
6394                          *
6395                          * The most common failure here is just -ENOENT.
6396                          */
6397                         btrfs_del_orphan_item(trans, tree_root,
6398                                               root->root_key.objectid);
6399                 }
6400         }
6401
6402         if (root->in_radix) {
6403                 btrfs_free_fs_root(tree_root->fs_info, root);
6404         } else {
6405                 free_extent_buffer(root->node);
6406                 free_extent_buffer(root->commit_root);
6407                 kfree(root);
6408         }
6409 out:
6410         btrfs_end_transaction_throttle(trans, tree_root);
6411         kfree(wc);
6412         btrfs_free_path(path);
6413         return err;
6414 }
6415
6416 /*
6417  * drop subtree rooted at tree block 'node'.
6418  *
6419  * NOTE: this function will unlock and release tree block 'node'
6420  */
6421 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6422                         struct btrfs_root *root,
6423                         struct extent_buffer *node,
6424                         struct extent_buffer *parent)
6425 {
6426         struct btrfs_path *path;
6427         struct walk_control *wc;
6428         int level;
6429         int parent_level;
6430         int ret = 0;
6431         int wret;
6432
6433         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6434
6435         path = btrfs_alloc_path();
6436         if (!path)
6437                 return -ENOMEM;
6438
6439         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6440         if (!wc) {
6441                 btrfs_free_path(path);
6442                 return -ENOMEM;
6443         }
6444
6445         btrfs_assert_tree_locked(parent);
6446         parent_level = btrfs_header_level(parent);
6447         extent_buffer_get(parent);
6448         path->nodes[parent_level] = parent;
6449         path->slots[parent_level] = btrfs_header_nritems(parent);
6450
6451         btrfs_assert_tree_locked(node);
6452         level = btrfs_header_level(node);
6453         path->nodes[level] = node;
6454         path->slots[level] = 0;
6455         path->locks[level] = 1;
6456
6457         wc->refs[parent_level] = 1;
6458         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6459         wc->level = level;
6460         wc->shared_level = -1;
6461         wc->stage = DROP_REFERENCE;
6462         wc->update_ref = 0;
6463         wc->keep_locks = 1;
6464         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6465
6466         while (1) {
6467                 wret = walk_down_tree(trans, root, path, wc);
6468                 if (wret < 0) {
6469                         ret = wret;
6470                         break;
6471                 }
6472
6473                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6474                 if (wret < 0)
6475                         ret = wret;
6476                 if (wret != 0)
6477                         break;
6478         }
6479
6480         kfree(wc);
6481         btrfs_free_path(path);
6482         return ret;
6483 }
6484
6485 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6486 {
6487         u64 num_devices;
6488         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6489                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6490
6491         /*
6492          * we add in the count of missing devices because we want
6493          * to make sure that any RAID levels on a degraded FS
6494          * continue to be honored.
6495          */
6496         num_devices = root->fs_info->fs_devices->rw_devices +
6497                 root->fs_info->fs_devices->missing_devices;
6498
6499         if (num_devices == 1) {
6500                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6501                 stripped = flags & ~stripped;
6502
6503                 /* turn raid0 into single device chunks */
6504                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6505                         return stripped;
6506
6507                 /* turn mirroring into duplication */
6508                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6509                              BTRFS_BLOCK_GROUP_RAID10))
6510                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6511                 return flags;
6512         } else {
6513                 /* they already had raid on here, just return */
6514                 if (flags & stripped)
6515                         return flags;
6516
6517                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6518                 stripped = flags & ~stripped;
6519
6520                 /* switch duplicated blocks with raid1 */
6521                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6522                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6523
6524                 /* turn single device chunks into raid0 */
6525                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6526         }
6527         return flags;
6528 }
6529
6530 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
6531 {
6532         struct btrfs_space_info *sinfo = cache->space_info;
6533         u64 num_bytes;
6534         int ret = -ENOSPC;
6535
6536         if (cache->ro)
6537                 return 0;
6538
6539         spin_lock(&sinfo->lock);
6540         spin_lock(&cache->lock);
6541         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6542                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6543
6544         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6545             sinfo->bytes_may_use + sinfo->bytes_readonly +
6546             cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
6547                 sinfo->bytes_readonly += num_bytes;
6548                 sinfo->bytes_reserved += cache->reserved_pinned;
6549                 cache->reserved_pinned = 0;
6550                 cache->ro = 1;
6551                 ret = 0;
6552         }
6553
6554         spin_unlock(&cache->lock);
6555         spin_unlock(&sinfo->lock);
6556         return ret;
6557 }
6558
6559 int btrfs_set_block_group_ro(struct btrfs_root *root,
6560                              struct btrfs_block_group_cache *cache)
6561
6562 {
6563         struct btrfs_trans_handle *trans;
6564         u64 alloc_flags;
6565         int ret;
6566
6567         BUG_ON(cache->ro);
6568
6569         trans = btrfs_join_transaction(root);
6570         BUG_ON(IS_ERR(trans));
6571
6572         alloc_flags = update_block_group_flags(root, cache->flags);
6573         if (alloc_flags != cache->flags)
6574                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6575                                CHUNK_ALLOC_FORCE);
6576
6577         ret = set_block_group_ro(cache);
6578         if (!ret)
6579                 goto out;
6580         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
6581         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6582                              CHUNK_ALLOC_FORCE);
6583         if (ret < 0)
6584                 goto out;
6585         ret = set_block_group_ro(cache);
6586 out:
6587         btrfs_end_transaction(trans, root);
6588         return ret;
6589 }
6590
6591 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
6592                             struct btrfs_root *root, u64 type)
6593 {
6594         u64 alloc_flags = get_alloc_profile(root, type);
6595         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6596                               CHUNK_ALLOC_FORCE);
6597 }
6598
6599 /*
6600  * helper to account the unused space of all the readonly block group in the
6601  * list. takes mirrors into account.
6602  */
6603 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
6604 {
6605         struct btrfs_block_group_cache *block_group;
6606         u64 free_bytes = 0;
6607         int factor;
6608
6609         list_for_each_entry(block_group, groups_list, list) {
6610                 spin_lock(&block_group->lock);
6611
6612                 if (!block_group->ro) {
6613                         spin_unlock(&block_group->lock);
6614                         continue;
6615                 }
6616
6617                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
6618                                           BTRFS_BLOCK_GROUP_RAID10 |
6619                                           BTRFS_BLOCK_GROUP_DUP))
6620                         factor = 2;
6621                 else
6622                         factor = 1;
6623
6624                 free_bytes += (block_group->key.offset -
6625                                btrfs_block_group_used(&block_group->item)) *
6626                                factor;
6627
6628                 spin_unlock(&block_group->lock);
6629         }
6630
6631         return free_bytes;
6632 }
6633
6634 /*
6635  * helper to account the unused space of all the readonly block group in the
6636  * space_info. takes mirrors into account.
6637  */
6638 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
6639 {
6640         int i;
6641         u64 free_bytes = 0;
6642
6643         spin_lock(&sinfo->lock);
6644
6645         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
6646                 if (!list_empty(&sinfo->block_groups[i]))
6647                         free_bytes += __btrfs_get_ro_block_group_free_space(
6648                                                 &sinfo->block_groups[i]);
6649
6650         spin_unlock(&sinfo->lock);
6651
6652         return free_bytes;
6653 }
6654
6655 int btrfs_set_block_group_rw(struct btrfs_root *root,
6656                               struct btrfs_block_group_cache *cache)
6657 {
6658         struct btrfs_space_info *sinfo = cache->space_info;
6659         u64 num_bytes;
6660
6661         BUG_ON(!cache->ro);
6662
6663         spin_lock(&sinfo->lock);
6664         spin_lock(&cache->lock);
6665         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6666                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6667         sinfo->bytes_readonly -= num_bytes;
6668         cache->ro = 0;
6669         spin_unlock(&cache->lock);
6670         spin_unlock(&sinfo->lock);
6671         return 0;
6672 }
6673
6674 /*
6675  * checks to see if its even possible to relocate this block group.
6676  *
6677  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6678  * ok to go ahead and try.
6679  */
6680 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6681 {
6682         struct btrfs_block_group_cache *block_group;
6683         struct btrfs_space_info *space_info;
6684         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6685         struct btrfs_device *device;
6686         int full = 0;
6687         int ret = 0;
6688
6689         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6690
6691         /* odd, couldn't find the block group, leave it alone */
6692         if (!block_group)
6693                 return -1;
6694
6695         /* no bytes used, we're good */
6696         if (!btrfs_block_group_used(&block_group->item))
6697                 goto out;
6698
6699         space_info = block_group->space_info;
6700         spin_lock(&space_info->lock);
6701
6702         full = space_info->full;
6703
6704         /*
6705          * if this is the last block group we have in this space, we can't
6706          * relocate it unless we're able to allocate a new chunk below.
6707          *
6708          * Otherwise, we need to make sure we have room in the space to handle
6709          * all of the extents from this block group.  If we can, we're good
6710          */
6711         if ((space_info->total_bytes != block_group->key.offset) &&
6712            (space_info->bytes_used + space_info->bytes_reserved +
6713             space_info->bytes_pinned + space_info->bytes_readonly +
6714             btrfs_block_group_used(&block_group->item) <
6715             space_info->total_bytes)) {
6716                 spin_unlock(&space_info->lock);
6717                 goto out;
6718         }
6719         spin_unlock(&space_info->lock);
6720
6721         /*
6722          * ok we don't have enough space, but maybe we have free space on our
6723          * devices to allocate new chunks for relocation, so loop through our
6724          * alloc devices and guess if we have enough space.  However, if we
6725          * were marked as full, then we know there aren't enough chunks, and we
6726          * can just return.
6727          */
6728         ret = -1;
6729         if (full)
6730                 goto out;
6731
6732         mutex_lock(&root->fs_info->chunk_mutex);
6733         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6734                 u64 min_free = btrfs_block_group_used(&block_group->item);
6735                 u64 dev_offset;
6736
6737                 /*
6738                  * check to make sure we can actually find a chunk with enough
6739                  * space to fit our block group in.
6740                  */
6741                 if (device->total_bytes > device->bytes_used + min_free) {
6742                         ret = find_free_dev_extent(NULL, device, min_free,
6743                                                    &dev_offset, NULL);
6744                         if (!ret)
6745                                 break;
6746                         ret = -1;
6747                 }
6748         }
6749         mutex_unlock(&root->fs_info->chunk_mutex);
6750 out:
6751         btrfs_put_block_group(block_group);
6752         return ret;
6753 }
6754
6755 static int find_first_block_group(struct btrfs_root *root,
6756                 struct btrfs_path *path, struct btrfs_key *key)
6757 {
6758         int ret = 0;
6759         struct btrfs_key found_key;
6760         struct extent_buffer *leaf;
6761         int slot;
6762
6763         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6764         if (ret < 0)
6765                 goto out;
6766
6767         while (1) {
6768                 slot = path->slots[0];
6769                 leaf = path->nodes[0];
6770                 if (slot >= btrfs_header_nritems(leaf)) {
6771                         ret = btrfs_next_leaf(root, path);
6772                         if (ret == 0)
6773                                 continue;
6774                         if (ret < 0)
6775                                 goto out;
6776                         break;
6777                 }
6778                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6779
6780                 if (found_key.objectid >= key->objectid &&
6781                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6782                         ret = 0;
6783                         goto out;
6784                 }
6785                 path->slots[0]++;
6786         }
6787 out:
6788         return ret;
6789 }
6790
6791 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
6792 {
6793         struct btrfs_block_group_cache *block_group;
6794         u64 last = 0;
6795
6796         while (1) {
6797                 struct inode *inode;
6798
6799                 block_group = btrfs_lookup_first_block_group(info, last);
6800                 while (block_group) {
6801                         spin_lock(&block_group->lock);
6802                         if (block_group->iref)
6803                                 break;
6804                         spin_unlock(&block_group->lock);
6805                         block_group = next_block_group(info->tree_root,
6806                                                        block_group);
6807                 }
6808                 if (!block_group) {
6809                         if (last == 0)
6810                                 break;
6811                         last = 0;
6812                         continue;
6813                 }
6814
6815                 inode = block_group->inode;
6816                 block_group->iref = 0;
6817                 block_group->inode = NULL;
6818                 spin_unlock(&block_group->lock);
6819                 iput(inode);
6820                 last = block_group->key.objectid + block_group->key.offset;
6821                 btrfs_put_block_group(block_group);
6822         }
6823 }
6824
6825 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6826 {
6827         struct btrfs_block_group_cache *block_group;
6828         struct btrfs_space_info *space_info;
6829         struct btrfs_caching_control *caching_ctl;
6830         struct rb_node *n;
6831
6832         down_write(&info->extent_commit_sem);
6833         while (!list_empty(&info->caching_block_groups)) {
6834                 caching_ctl = list_entry(info->caching_block_groups.next,
6835                                          struct btrfs_caching_control, list);
6836                 list_del(&caching_ctl->list);
6837                 put_caching_control(caching_ctl);
6838         }
6839         up_write(&info->extent_commit_sem);
6840
6841         spin_lock(&info->block_group_cache_lock);
6842         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6843                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6844                                        cache_node);
6845                 rb_erase(&block_group->cache_node,
6846                          &info->block_group_cache_tree);
6847                 spin_unlock(&info->block_group_cache_lock);
6848
6849                 down_write(&block_group->space_info->groups_sem);
6850                 list_del(&block_group->list);
6851                 up_write(&block_group->space_info->groups_sem);
6852
6853                 if (block_group->cached == BTRFS_CACHE_STARTED)
6854                         wait_block_group_cache_done(block_group);
6855
6856                 /*
6857                  * We haven't cached this block group, which means we could
6858                  * possibly have excluded extents on this block group.
6859                  */
6860                 if (block_group->cached == BTRFS_CACHE_NO)
6861                         free_excluded_extents(info->extent_root, block_group);
6862
6863                 btrfs_remove_free_space_cache(block_group);
6864                 btrfs_put_block_group(block_group);
6865
6866                 spin_lock(&info->block_group_cache_lock);
6867         }
6868         spin_unlock(&info->block_group_cache_lock);
6869
6870         /* now that all the block groups are freed, go through and
6871          * free all the space_info structs.  This is only called during
6872          * the final stages of unmount, and so we know nobody is
6873          * using them.  We call synchronize_rcu() once before we start,
6874          * just to be on the safe side.
6875          */
6876         synchronize_rcu();
6877
6878         release_global_block_rsv(info);
6879
6880         while(!list_empty(&info->space_info)) {
6881                 space_info = list_entry(info->space_info.next,
6882                                         struct btrfs_space_info,
6883                                         list);
6884                 if (space_info->bytes_pinned > 0 ||
6885                     space_info->bytes_reserved > 0) {
6886                         WARN_ON(1);
6887                         dump_space_info(space_info, 0, 0);
6888                 }
6889                 list_del(&space_info->list);
6890                 kfree(space_info);
6891         }
6892         return 0;
6893 }
6894
6895 static void __link_block_group(struct btrfs_space_info *space_info,
6896                                struct btrfs_block_group_cache *cache)
6897 {
6898         int index = get_block_group_index(cache);
6899
6900         down_write(&space_info->groups_sem);
6901         list_add_tail(&cache->list, &space_info->block_groups[index]);
6902         up_write(&space_info->groups_sem);
6903 }
6904
6905 int btrfs_read_block_groups(struct btrfs_root *root)
6906 {
6907         struct btrfs_path *path;
6908         int ret;
6909         struct btrfs_block_group_cache *cache;
6910         struct btrfs_fs_info *info = root->fs_info;
6911         struct btrfs_space_info *space_info;
6912         struct btrfs_key key;
6913         struct btrfs_key found_key;
6914         struct extent_buffer *leaf;
6915         int need_clear = 0;
6916         u64 cache_gen;
6917
6918         root = info->extent_root;
6919         key.objectid = 0;
6920         key.offset = 0;
6921         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6922         path = btrfs_alloc_path();
6923         if (!path)
6924                 return -ENOMEM;
6925         path->reada = 1;
6926
6927         cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
6928         if (cache_gen != 0 &&
6929             btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
6930                 need_clear = 1;
6931         if (btrfs_test_opt(root, CLEAR_CACHE))
6932                 need_clear = 1;
6933         if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
6934                 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
6935
6936         while (1) {
6937                 ret = find_first_block_group(root, path, &key);
6938                 if (ret > 0)
6939                         break;
6940                 if (ret != 0)
6941                         goto error;
6942                 leaf = path->nodes[0];
6943                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6944                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6945                 if (!cache) {
6946                         ret = -ENOMEM;
6947                         goto error;
6948                 }
6949                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
6950                                                 GFP_NOFS);
6951                 if (!cache->free_space_ctl) {
6952                         kfree(cache);
6953                         ret = -ENOMEM;
6954                         goto error;
6955                 }
6956
6957                 atomic_set(&cache->count, 1);
6958                 spin_lock_init(&cache->lock);
6959                 cache->fs_info = info;
6960                 INIT_LIST_HEAD(&cache->list);
6961                 INIT_LIST_HEAD(&cache->cluster_list);
6962
6963                 if (need_clear)
6964                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6965
6966                 read_extent_buffer(leaf, &cache->item,
6967                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
6968                                    sizeof(cache->item));
6969                 memcpy(&cache->key, &found_key, sizeof(found_key));
6970
6971                 key.objectid = found_key.objectid + found_key.offset;
6972                 btrfs_release_path(path);
6973                 cache->flags = btrfs_block_group_flags(&cache->item);
6974                 cache->sectorsize = root->sectorsize;
6975
6976                 btrfs_init_free_space_ctl(cache);
6977
6978                 /*
6979                  * We need to exclude the super stripes now so that the space
6980                  * info has super bytes accounted for, otherwise we'll think
6981                  * we have more space than we actually do.
6982                  */
6983                 exclude_super_stripes(root, cache);
6984
6985                 /*
6986                  * check for two cases, either we are full, and therefore
6987                  * don't need to bother with the caching work since we won't
6988                  * find any space, or we are empty, and we can just add all
6989                  * the space in and be done with it.  This saves us _alot_ of
6990                  * time, particularly in the full case.
6991                  */
6992                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
6993                         cache->last_byte_to_unpin = (u64)-1;
6994                         cache->cached = BTRFS_CACHE_FINISHED;
6995                         free_excluded_extents(root, cache);
6996                 } else if (btrfs_block_group_used(&cache->item) == 0) {
6997                         cache->last_byte_to_unpin = (u64)-1;
6998                         cache->cached = BTRFS_CACHE_FINISHED;
6999                         add_new_free_space(cache, root->fs_info,
7000                                            found_key.objectid,
7001                                            found_key.objectid +
7002                                            found_key.offset);
7003                         free_excluded_extents(root, cache);
7004                 }
7005
7006                 ret = update_space_info(info, cache->flags, found_key.offset,
7007                                         btrfs_block_group_used(&cache->item),
7008                                         &space_info);
7009                 BUG_ON(ret);
7010                 cache->space_info = space_info;
7011                 spin_lock(&cache->space_info->lock);
7012                 cache->space_info->bytes_readonly += cache->bytes_super;
7013                 spin_unlock(&cache->space_info->lock);
7014
7015                 __link_block_group(space_info, cache);
7016
7017                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7018                 BUG_ON(ret);
7019
7020                 set_avail_alloc_bits(root->fs_info, cache->flags);
7021                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7022                         set_block_group_ro(cache);
7023         }
7024
7025         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7026                 if (!(get_alloc_profile(root, space_info->flags) &
7027                       (BTRFS_BLOCK_GROUP_RAID10 |
7028                        BTRFS_BLOCK_GROUP_RAID1 |
7029                        BTRFS_BLOCK_GROUP_DUP)))
7030                         continue;
7031                 /*
7032                  * avoid allocating from un-mirrored block group if there are
7033                  * mirrored block groups.
7034                  */
7035                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7036                         set_block_group_ro(cache);
7037                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7038                         set_block_group_ro(cache);
7039         }
7040
7041         init_global_block_rsv(info);
7042         ret = 0;
7043 error:
7044         btrfs_free_path(path);
7045         return ret;
7046 }
7047
7048 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7049                            struct btrfs_root *root, u64 bytes_used,
7050                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7051                            u64 size)
7052 {
7053         int ret;
7054         struct btrfs_root *extent_root;
7055         struct btrfs_block_group_cache *cache;
7056
7057         extent_root = root->fs_info->extent_root;
7058
7059         root->fs_info->last_trans_log_full_commit = trans->transid;
7060
7061         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7062         if (!cache)
7063                 return -ENOMEM;
7064         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7065                                         GFP_NOFS);
7066         if (!cache->free_space_ctl) {
7067                 kfree(cache);
7068                 return -ENOMEM;
7069         }
7070
7071         cache->key.objectid = chunk_offset;
7072         cache->key.offset = size;
7073         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7074         cache->sectorsize = root->sectorsize;
7075         cache->fs_info = root->fs_info;
7076
7077         atomic_set(&cache->count, 1);
7078         spin_lock_init(&cache->lock);
7079         INIT_LIST_HEAD(&cache->list);
7080         INIT_LIST_HEAD(&cache->cluster_list);
7081
7082         btrfs_init_free_space_ctl(cache);
7083
7084         btrfs_set_block_group_used(&cache->item, bytes_used);
7085         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7086         cache->flags = type;
7087         btrfs_set_block_group_flags(&cache->item, type);
7088
7089         cache->last_byte_to_unpin = (u64)-1;
7090         cache->cached = BTRFS_CACHE_FINISHED;
7091         exclude_super_stripes(root, cache);
7092
7093         add_new_free_space(cache, root->fs_info, chunk_offset,
7094                            chunk_offset + size);
7095
7096         free_excluded_extents(root, cache);
7097
7098         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7099                                 &cache->space_info);
7100         BUG_ON(ret);
7101
7102         spin_lock(&cache->space_info->lock);
7103         cache->space_info->bytes_readonly += cache->bytes_super;
7104         spin_unlock(&cache->space_info->lock);
7105
7106         __link_block_group(cache->space_info, cache);
7107
7108         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7109         BUG_ON(ret);
7110
7111         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7112                                 sizeof(cache->item));
7113         BUG_ON(ret);
7114
7115         set_avail_alloc_bits(extent_root->fs_info, type);
7116
7117         return 0;
7118 }
7119
7120 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7121                              struct btrfs_root *root, u64 group_start)
7122 {
7123         struct btrfs_path *path;
7124         struct btrfs_block_group_cache *block_group;
7125         struct btrfs_free_cluster *cluster;
7126         struct btrfs_root *tree_root = root->fs_info->tree_root;
7127         struct btrfs_key key;
7128         struct inode *inode;
7129         int ret;
7130         int factor;
7131
7132         root = root->fs_info->extent_root;
7133
7134         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7135         BUG_ON(!block_group);
7136         BUG_ON(!block_group->ro);
7137
7138         /*
7139          * Free the reserved super bytes from this block group before
7140          * remove it.
7141          */
7142         free_excluded_extents(root, block_group);
7143
7144         memcpy(&key, &block_group->key, sizeof(key));
7145         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7146                                   BTRFS_BLOCK_GROUP_RAID1 |
7147                                   BTRFS_BLOCK_GROUP_RAID10))
7148                 factor = 2;
7149         else
7150                 factor = 1;
7151
7152         /* make sure this block group isn't part of an allocation cluster */
7153         cluster = &root->fs_info->data_alloc_cluster;
7154         spin_lock(&cluster->refill_lock);
7155         btrfs_return_cluster_to_free_space(block_group, cluster);
7156         spin_unlock(&cluster->refill_lock);
7157
7158         /*
7159          * make sure this block group isn't part of a metadata
7160          * allocation cluster
7161          */
7162         cluster = &root->fs_info->meta_alloc_cluster;
7163         spin_lock(&cluster->refill_lock);
7164         btrfs_return_cluster_to_free_space(block_group, cluster);
7165         spin_unlock(&cluster->refill_lock);
7166
7167         path = btrfs_alloc_path();
7168         if (!path) {
7169                 ret = -ENOMEM;
7170                 goto out;
7171         }
7172
7173         inode = lookup_free_space_inode(root, block_group, path);
7174         if (!IS_ERR(inode)) {
7175                 btrfs_orphan_add(trans, inode);
7176                 clear_nlink(inode);
7177                 /* One for the block groups ref */
7178                 spin_lock(&block_group->lock);
7179                 if (block_group->iref) {
7180                         block_group->iref = 0;
7181                         block_group->inode = NULL;
7182                         spin_unlock(&block_group->lock);
7183                         iput(inode);
7184                 } else {
7185                         spin_unlock(&block_group->lock);
7186                 }
7187                 /* One for our lookup ref */
7188                 iput(inode);
7189         }
7190
7191         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7192         key.offset = block_group->key.objectid;
7193         key.type = 0;
7194
7195         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7196         if (ret < 0)
7197                 goto out;
7198         if (ret > 0)
7199                 btrfs_release_path(path);
7200         if (ret == 0) {
7201                 ret = btrfs_del_item(trans, tree_root, path);
7202                 if (ret)
7203                         goto out;
7204                 btrfs_release_path(path);
7205         }
7206
7207         spin_lock(&root->fs_info->block_group_cache_lock);
7208         rb_erase(&block_group->cache_node,
7209                  &root->fs_info->block_group_cache_tree);
7210         spin_unlock(&root->fs_info->block_group_cache_lock);
7211
7212         down_write(&block_group->space_info->groups_sem);
7213         /*
7214          * we must use list_del_init so people can check to see if they
7215          * are still on the list after taking the semaphore
7216          */
7217         list_del_init(&block_group->list);
7218         up_write(&block_group->space_info->groups_sem);
7219
7220         if (block_group->cached == BTRFS_CACHE_STARTED)
7221                 wait_block_group_cache_done(block_group);
7222
7223         btrfs_remove_free_space_cache(block_group);
7224
7225         spin_lock(&block_group->space_info->lock);
7226         block_group->space_info->total_bytes -= block_group->key.offset;
7227         block_group->space_info->bytes_readonly -= block_group->key.offset;
7228         block_group->space_info->disk_total -= block_group->key.offset * factor;
7229         spin_unlock(&block_group->space_info->lock);
7230
7231         memcpy(&key, &block_group->key, sizeof(key));
7232
7233         btrfs_clear_space_info_full(root->fs_info);
7234
7235         btrfs_put_block_group(block_group);
7236         btrfs_put_block_group(block_group);
7237
7238         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7239         if (ret > 0)
7240                 ret = -EIO;
7241         if (ret < 0)
7242                 goto out;
7243
7244         ret = btrfs_del_item(trans, root, path);
7245 out:
7246         btrfs_free_path(path);
7247         return ret;
7248 }
7249
7250 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7251 {
7252         struct btrfs_space_info *space_info;
7253         struct btrfs_super_block *disk_super;
7254         u64 features;
7255         u64 flags;
7256         int mixed = 0;
7257         int ret;
7258
7259         disk_super = &fs_info->super_copy;
7260         if (!btrfs_super_root(disk_super))
7261                 return 1;
7262
7263         features = btrfs_super_incompat_flags(disk_super);
7264         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7265                 mixed = 1;
7266
7267         flags = BTRFS_BLOCK_GROUP_SYSTEM;
7268         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7269         if (ret)
7270                 goto out;
7271
7272         if (mixed) {
7273                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7274                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7275         } else {
7276                 flags = BTRFS_BLOCK_GROUP_METADATA;
7277                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7278                 if (ret)
7279                         goto out;
7280
7281                 flags = BTRFS_BLOCK_GROUP_DATA;
7282                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7283         }
7284 out:
7285         return ret;
7286 }
7287
7288 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7289 {
7290         return unpin_extent_range(root, start, end);
7291 }
7292
7293 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7294                                u64 num_bytes, u64 *actual_bytes)
7295 {
7296         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7297 }
7298
7299 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7300 {
7301         struct btrfs_fs_info *fs_info = root->fs_info;
7302         struct btrfs_block_group_cache *cache = NULL;
7303         u64 group_trimmed;
7304         u64 start;
7305         u64 end;
7306         u64 trimmed = 0;
7307         int ret = 0;
7308
7309         cache = btrfs_lookup_block_group(fs_info, range->start);
7310
7311         while (cache) {
7312                 if (cache->key.objectid >= (range->start + range->len)) {
7313                         btrfs_put_block_group(cache);
7314                         break;
7315                 }
7316
7317                 start = max(range->start, cache->key.objectid);
7318                 end = min(range->start + range->len,
7319                                 cache->key.objectid + cache->key.offset);
7320
7321                 if (end - start >= range->minlen) {
7322                         if (!block_group_cache_done(cache)) {
7323                                 ret = cache_block_group(cache, NULL, root, 0);
7324                                 if (!ret)
7325                                         wait_block_group_cache_done(cache);
7326                         }
7327                         ret = btrfs_trim_block_group(cache,
7328                                                      &group_trimmed,
7329                                                      start,
7330                                                      end,
7331                                                      range->minlen);
7332
7333                         trimmed += group_trimmed;
7334                         if (ret) {
7335                                 btrfs_put_block_group(cache);
7336                                 break;
7337                         }
7338                 }
7339
7340                 cache = next_block_group(fs_info->tree_root, cache);
7341         }
7342
7343         range->len = trimmed;
7344         return ret;
7345 }