btrfs: fix lockdep warning with reclaim lock inversion
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 #include "sysfs.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->commit_root_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched() ||
446                             rwsem_is_contended(&fs_info->commit_root_sem)) {
447                                 caching_ctl->progress = last;
448                                 btrfs_release_path(path);
449                                 up_read(&fs_info->commit_root_sem);
450                                 mutex_unlock(&caching_ctl->mutex);
451                                 cond_resched();
452                                 goto again;
453                         }
454
455                         ret = btrfs_next_leaf(extent_root, path);
456                         if (ret < 0)
457                                 goto err;
458                         if (ret)
459                                 break;
460                         leaf = path->nodes[0];
461                         nritems = btrfs_header_nritems(leaf);
462                         continue;
463                 }
464
465                 if (key.objectid < last) {
466                         key.objectid = last;
467                         key.offset = 0;
468                         key.type = BTRFS_EXTENT_ITEM_KEY;
469
470                         caching_ctl->progress = last;
471                         btrfs_release_path(path);
472                         goto next;
473                 }
474
475                 if (key.objectid < block_group->key.objectid) {
476                         path->slots[0]++;
477                         continue;
478                 }
479
480                 if (key.objectid >= block_group->key.objectid +
481                     block_group->key.offset)
482                         break;
483
484                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
485                     key.type == BTRFS_METADATA_ITEM_KEY) {
486                         total_found += add_new_free_space(block_group,
487                                                           fs_info, last,
488                                                           key.objectid);
489                         if (key.type == BTRFS_METADATA_ITEM_KEY)
490                                 last = key.objectid +
491                                         fs_info->tree_root->leafsize;
492                         else
493                                 last = key.objectid + key.offset;
494
495                         if (total_found > (1024 * 1024 * 2)) {
496                                 total_found = 0;
497                                 wake_up(&caching_ctl->wait);
498                         }
499                 }
500                 path->slots[0]++;
501         }
502         ret = 0;
503
504         total_found += add_new_free_space(block_group, fs_info, last,
505                                           block_group->key.objectid +
506                                           block_group->key.offset);
507         caching_ctl->progress = (u64)-1;
508
509         spin_lock(&block_group->lock);
510         block_group->caching_ctl = NULL;
511         block_group->cached = BTRFS_CACHE_FINISHED;
512         spin_unlock(&block_group->lock);
513
514 err:
515         btrfs_free_path(path);
516         up_read(&fs_info->commit_root_sem);
517
518         free_excluded_extents(extent_root, block_group);
519
520         mutex_unlock(&caching_ctl->mutex);
521 out:
522         if (ret) {
523                 spin_lock(&block_group->lock);
524                 block_group->caching_ctl = NULL;
525                 block_group->cached = BTRFS_CACHE_ERROR;
526                 spin_unlock(&block_group->lock);
527         }
528         wake_up(&caching_ctl->wait);
529
530         put_caching_control(caching_ctl);
531         btrfs_put_block_group(block_group);
532 }
533
534 static int cache_block_group(struct btrfs_block_group_cache *cache,
535                              int load_cache_only)
536 {
537         DEFINE_WAIT(wait);
538         struct btrfs_fs_info *fs_info = cache->fs_info;
539         struct btrfs_caching_control *caching_ctl;
540         int ret = 0;
541
542         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
543         if (!caching_ctl)
544                 return -ENOMEM;
545
546         INIT_LIST_HEAD(&caching_ctl->list);
547         mutex_init(&caching_ctl->mutex);
548         init_waitqueue_head(&caching_ctl->wait);
549         caching_ctl->block_group = cache;
550         caching_ctl->progress = cache->key.objectid;
551         atomic_set(&caching_ctl->count, 1);
552         btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
553
554         spin_lock(&cache->lock);
555         /*
556          * This should be a rare occasion, but this could happen I think in the
557          * case where one thread starts to load the space cache info, and then
558          * some other thread starts a transaction commit which tries to do an
559          * allocation while the other thread is still loading the space cache
560          * info.  The previous loop should have kept us from choosing this block
561          * group, but if we've moved to the state where we will wait on caching
562          * block groups we need to first check if we're doing a fast load here,
563          * so we can wait for it to finish, otherwise we could end up allocating
564          * from a block group who's cache gets evicted for one reason or
565          * another.
566          */
567         while (cache->cached == BTRFS_CACHE_FAST) {
568                 struct btrfs_caching_control *ctl;
569
570                 ctl = cache->caching_ctl;
571                 atomic_inc(&ctl->count);
572                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
573                 spin_unlock(&cache->lock);
574
575                 schedule();
576
577                 finish_wait(&ctl->wait, &wait);
578                 put_caching_control(ctl);
579                 spin_lock(&cache->lock);
580         }
581
582         if (cache->cached != BTRFS_CACHE_NO) {
583                 spin_unlock(&cache->lock);
584                 kfree(caching_ctl);
585                 return 0;
586         }
587         WARN_ON(cache->caching_ctl);
588         cache->caching_ctl = caching_ctl;
589         cache->cached = BTRFS_CACHE_FAST;
590         spin_unlock(&cache->lock);
591
592         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                         }
607                 }
608                 spin_unlock(&cache->lock);
609                 wake_up(&caching_ctl->wait);
610                 if (ret == 1) {
611                         put_caching_control(caching_ctl);
612                         free_excluded_extents(fs_info->extent_root, cache);
613                         return 0;
614                 }
615         } else {
616                 /*
617                  * We are not going to do the fast caching, set cached to the
618                  * appropriate value and wakeup any waiters.
619                  */
620                 spin_lock(&cache->lock);
621                 if (load_cache_only) {
622                         cache->caching_ctl = NULL;
623                         cache->cached = BTRFS_CACHE_NO;
624                 } else {
625                         cache->cached = BTRFS_CACHE_STARTED;
626                 }
627                 spin_unlock(&cache->lock);
628                 wake_up(&caching_ctl->wait);
629         }
630
631         if (load_cache_only) {
632                 put_caching_control(caching_ctl);
633                 return 0;
634         }
635
636         down_write(&fs_info->commit_root_sem);
637         atomic_inc(&caching_ctl->count);
638         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
639         up_write(&fs_info->commit_root_sem);
640
641         btrfs_get_block_group(cache);
642
643         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
644
645         return ret;
646 }
647
648 /*
649  * return the block group that starts at or after bytenr
650  */
651 static struct btrfs_block_group_cache *
652 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 {
654         struct btrfs_block_group_cache *cache;
655
656         cache = block_group_cache_tree_search(info, bytenr, 0);
657
658         return cache;
659 }
660
661 /*
662  * return the block group that contains the given bytenr
663  */
664 struct btrfs_block_group_cache *btrfs_lookup_block_group(
665                                                  struct btrfs_fs_info *info,
666                                                  u64 bytenr)
667 {
668         struct btrfs_block_group_cache *cache;
669
670         cache = block_group_cache_tree_search(info, bytenr, 1);
671
672         return cache;
673 }
674
675 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
676                                                   u64 flags)
677 {
678         struct list_head *head = &info->space_info;
679         struct btrfs_space_info *found;
680
681         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
682
683         rcu_read_lock();
684         list_for_each_entry_rcu(found, head, list) {
685                 if (found->flags & flags) {
686                         rcu_read_unlock();
687                         return found;
688                 }
689         }
690         rcu_read_unlock();
691         return NULL;
692 }
693
694 /*
695  * after adding space to the filesystem, we need to clear the full flags
696  * on all the space infos.
697  */
698 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 {
700         struct list_head *head = &info->space_info;
701         struct btrfs_space_info *found;
702
703         rcu_read_lock();
704         list_for_each_entry_rcu(found, head, list)
705                 found->full = 0;
706         rcu_read_unlock();
707 }
708
709 /* simple helper to search for an existing extent at a given offset */
710 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
711 {
712         int ret;
713         struct btrfs_key key;
714         struct btrfs_path *path;
715
716         path = btrfs_alloc_path();
717         if (!path)
718                 return -ENOMEM;
719
720         key.objectid = start;
721         key.offset = len;
722         key.type = BTRFS_EXTENT_ITEM_KEY;
723         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
724                                 0, 0);
725         if (ret > 0) {
726                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
727                 if (key.objectid == start &&
728                     key.type == BTRFS_METADATA_ITEM_KEY)
729                         ret = 0;
730         }
731         btrfs_free_path(path);
732         return ret;
733 }
734
735 /*
736  * helper function to lookup reference count and flags of a tree block.
737  *
738  * the head node for delayed ref is used to store the sum of all the
739  * reference count modifications queued up in the rbtree. the head
740  * node may also store the extent flags to set. This way you can check
741  * to see what the reference count and extent flags would be if all of
742  * the delayed refs are not processed.
743  */
744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
745                              struct btrfs_root *root, u64 bytenr,
746                              u64 offset, int metadata, u64 *refs, u64 *flags)
747 {
748         struct btrfs_delayed_ref_head *head;
749         struct btrfs_delayed_ref_root *delayed_refs;
750         struct btrfs_path *path;
751         struct btrfs_extent_item *ei;
752         struct extent_buffer *leaf;
753         struct btrfs_key key;
754         u32 item_size;
755         u64 num_refs;
756         u64 extent_flags;
757         int ret;
758
759         /*
760          * If we don't have skinny metadata, don't bother doing anything
761          * different
762          */
763         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
764                 offset = root->leafsize;
765                 metadata = 0;
766         }
767
768         path = btrfs_alloc_path();
769         if (!path)
770                 return -ENOMEM;
771
772         if (!trans) {
773                 path->skip_locking = 1;
774                 path->search_commit_root = 1;
775         }
776
777 search_again:
778         key.objectid = bytenr;
779         key.offset = offset;
780         if (metadata)
781                 key.type = BTRFS_METADATA_ITEM_KEY;
782         else
783                 key.type = BTRFS_EXTENT_ITEM_KEY;
784
785 again:
786         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
787                                 &key, path, 0, 0);
788         if (ret < 0)
789                 goto out_free;
790
791         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
792                 if (path->slots[0]) {
793                         path->slots[0]--;
794                         btrfs_item_key_to_cpu(path->nodes[0], &key,
795                                               path->slots[0]);
796                         if (key.objectid == bytenr &&
797                             key.type == BTRFS_EXTENT_ITEM_KEY &&
798                             key.offset == root->leafsize)
799                                 ret = 0;
800                 }
801                 if (ret) {
802                         key.objectid = bytenr;
803                         key.type = BTRFS_EXTENT_ITEM_KEY;
804                         key.offset = root->leafsize;
805                         btrfs_release_path(path);
806                         goto again;
807                 }
808         }
809
810         if (ret == 0) {
811                 leaf = path->nodes[0];
812                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
813                 if (item_size >= sizeof(*ei)) {
814                         ei = btrfs_item_ptr(leaf, path->slots[0],
815                                             struct btrfs_extent_item);
816                         num_refs = btrfs_extent_refs(leaf, ei);
817                         extent_flags = btrfs_extent_flags(leaf, ei);
818                 } else {
819 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
820                         struct btrfs_extent_item_v0 *ei0;
821                         BUG_ON(item_size != sizeof(*ei0));
822                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
823                                              struct btrfs_extent_item_v0);
824                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
825                         /* FIXME: this isn't correct for data */
826                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
827 #else
828                         BUG();
829 #endif
830                 }
831                 BUG_ON(num_refs == 0);
832         } else {
833                 num_refs = 0;
834                 extent_flags = 0;
835                 ret = 0;
836         }
837
838         if (!trans)
839                 goto out;
840
841         delayed_refs = &trans->transaction->delayed_refs;
842         spin_lock(&delayed_refs->lock);
843         head = btrfs_find_delayed_ref_head(trans, bytenr);
844         if (head) {
845                 if (!mutex_trylock(&head->mutex)) {
846                         atomic_inc(&head->node.refs);
847                         spin_unlock(&delayed_refs->lock);
848
849                         btrfs_release_path(path);
850
851                         /*
852                          * Mutex was contended, block until it's released and try
853                          * again
854                          */
855                         mutex_lock(&head->mutex);
856                         mutex_unlock(&head->mutex);
857                         btrfs_put_delayed_ref(&head->node);
858                         goto search_again;
859                 }
860                 spin_lock(&head->lock);
861                 if (head->extent_op && head->extent_op->update_flags)
862                         extent_flags |= head->extent_op->flags_to_set;
863                 else
864                         BUG_ON(num_refs == 0);
865
866                 num_refs += head->node.ref_mod;
867                 spin_unlock(&head->lock);
868                 mutex_unlock(&head->mutex);
869         }
870         spin_unlock(&delayed_refs->lock);
871 out:
872         WARN_ON(num_refs == 0);
873         if (refs)
874                 *refs = num_refs;
875         if (flags)
876                 *flags = extent_flags;
877 out_free:
878         btrfs_free_path(path);
879         return ret;
880 }
881
882 /*
883  * Back reference rules.  Back refs have three main goals:
884  *
885  * 1) differentiate between all holders of references to an extent so that
886  *    when a reference is dropped we can make sure it was a valid reference
887  *    before freeing the extent.
888  *
889  * 2) Provide enough information to quickly find the holders of an extent
890  *    if we notice a given block is corrupted or bad.
891  *
892  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
893  *    maintenance.  This is actually the same as #2, but with a slightly
894  *    different use case.
895  *
896  * There are two kinds of back refs. The implicit back refs is optimized
897  * for pointers in non-shared tree blocks. For a given pointer in a block,
898  * back refs of this kind provide information about the block's owner tree
899  * and the pointer's key. These information allow us to find the block by
900  * b-tree searching. The full back refs is for pointers in tree blocks not
901  * referenced by their owner trees. The location of tree block is recorded
902  * in the back refs. Actually the full back refs is generic, and can be
903  * used in all cases the implicit back refs is used. The major shortcoming
904  * of the full back refs is its overhead. Every time a tree block gets
905  * COWed, we have to update back refs entry for all pointers in it.
906  *
907  * For a newly allocated tree block, we use implicit back refs for
908  * pointers in it. This means most tree related operations only involve
909  * implicit back refs. For a tree block created in old transaction, the
910  * only way to drop a reference to it is COW it. So we can detect the
911  * event that tree block loses its owner tree's reference and do the
912  * back refs conversion.
913  *
914  * When a tree block is COW'd through a tree, there are four cases:
915  *
916  * The reference count of the block is one and the tree is the block's
917  * owner tree. Nothing to do in this case.
918  *
919  * The reference count of the block is one and the tree is not the
920  * block's owner tree. In this case, full back refs is used for pointers
921  * in the block. Remove these full back refs, add implicit back refs for
922  * every pointers in the new block.
923  *
924  * The reference count of the block is greater than one and the tree is
925  * the block's owner tree. In this case, implicit back refs is used for
926  * pointers in the block. Add full back refs for every pointers in the
927  * block, increase lower level extents' reference counts. The original
928  * implicit back refs are entailed to the new block.
929  *
930  * The reference count of the block is greater than one and the tree is
931  * not the block's owner tree. Add implicit back refs for every pointer in
932  * the new block, increase lower level extents' reference count.
933  *
934  * Back Reference Key composing:
935  *
936  * The key objectid corresponds to the first byte in the extent,
937  * The key type is used to differentiate between types of back refs.
938  * There are different meanings of the key offset for different types
939  * of back refs.
940  *
941  * File extents can be referenced by:
942  *
943  * - multiple snapshots, subvolumes, or different generations in one subvol
944  * - different files inside a single subvolume
945  * - different offsets inside a file (bookend extents in file.c)
946  *
947  * The extent ref structure for the implicit back refs has fields for:
948  *
949  * - Objectid of the subvolume root
950  * - objectid of the file holding the reference
951  * - original offset in the file
952  * - how many bookend extents
953  *
954  * The key offset for the implicit back refs is hash of the first
955  * three fields.
956  *
957  * The extent ref structure for the full back refs has field for:
958  *
959  * - number of pointers in the tree leaf
960  *
961  * The key offset for the implicit back refs is the first byte of
962  * the tree leaf
963  *
964  * When a file extent is allocated, The implicit back refs is used.
965  * the fields are filled in:
966  *
967  *     (root_key.objectid, inode objectid, offset in file, 1)
968  *
969  * When a file extent is removed file truncation, we find the
970  * corresponding implicit back refs and check the following fields:
971  *
972  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
973  *
974  * Btree extents can be referenced by:
975  *
976  * - Different subvolumes
977  *
978  * Both the implicit back refs and the full back refs for tree blocks
979  * only consist of key. The key offset for the implicit back refs is
980  * objectid of block's owner tree. The key offset for the full back refs
981  * is the first byte of parent block.
982  *
983  * When implicit back refs is used, information about the lowest key and
984  * level of the tree block are required. These information are stored in
985  * tree block info structure.
986  */
987
988 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
989 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
990                                   struct btrfs_root *root,
991                                   struct btrfs_path *path,
992                                   u64 owner, u32 extra_size)
993 {
994         struct btrfs_extent_item *item;
995         struct btrfs_extent_item_v0 *ei0;
996         struct btrfs_extent_ref_v0 *ref0;
997         struct btrfs_tree_block_info *bi;
998         struct extent_buffer *leaf;
999         struct btrfs_key key;
1000         struct btrfs_key found_key;
1001         u32 new_size = sizeof(*item);
1002         u64 refs;
1003         int ret;
1004
1005         leaf = path->nodes[0];
1006         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007
1008         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1009         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1010                              struct btrfs_extent_item_v0);
1011         refs = btrfs_extent_refs_v0(leaf, ei0);
1012
1013         if (owner == (u64)-1) {
1014                 while (1) {
1015                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1016                                 ret = btrfs_next_leaf(root, path);
1017                                 if (ret < 0)
1018                                         return ret;
1019                                 BUG_ON(ret > 0); /* Corruption */
1020                                 leaf = path->nodes[0];
1021                         }
1022                         btrfs_item_key_to_cpu(leaf, &found_key,
1023                                               path->slots[0]);
1024                         BUG_ON(key.objectid != found_key.objectid);
1025                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1026                                 path->slots[0]++;
1027                                 continue;
1028                         }
1029                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1030                                               struct btrfs_extent_ref_v0);
1031                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1032                         break;
1033                 }
1034         }
1035         btrfs_release_path(path);
1036
1037         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1038                 new_size += sizeof(*bi);
1039
1040         new_size -= sizeof(*ei0);
1041         ret = btrfs_search_slot(trans, root, &key, path,
1042                                 new_size + extra_size, 1);
1043         if (ret < 0)
1044                 return ret;
1045         BUG_ON(ret); /* Corruption */
1046
1047         btrfs_extend_item(root, path, new_size);
1048
1049         leaf = path->nodes[0];
1050         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1051         btrfs_set_extent_refs(leaf, item, refs);
1052         /* FIXME: get real generation */
1053         btrfs_set_extent_generation(leaf, item, 0);
1054         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1055                 btrfs_set_extent_flags(leaf, item,
1056                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1057                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1058                 bi = (struct btrfs_tree_block_info *)(item + 1);
1059                 /* FIXME: get first key of the block */
1060                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1061                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062         } else {
1063                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064         }
1065         btrfs_mark_buffer_dirty(leaf);
1066         return 0;
1067 }
1068 #endif
1069
1070 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 {
1072         u32 high_crc = ~(u32)0;
1073         u32 low_crc = ~(u32)0;
1074         __le64 lenum;
1075
1076         lenum = cpu_to_le64(root_objectid);
1077         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1078         lenum = cpu_to_le64(owner);
1079         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1080         lenum = cpu_to_le64(offset);
1081         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1082
1083         return ((u64)high_crc << 31) ^ (u64)low_crc;
1084 }
1085
1086 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1087                                      struct btrfs_extent_data_ref *ref)
1088 {
1089         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1090                                     btrfs_extent_data_ref_objectid(leaf, ref),
1091                                     btrfs_extent_data_ref_offset(leaf, ref));
1092 }
1093
1094 static int match_extent_data_ref(struct extent_buffer *leaf,
1095                                  struct btrfs_extent_data_ref *ref,
1096                                  u64 root_objectid, u64 owner, u64 offset)
1097 {
1098         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1099             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1100             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1101                 return 0;
1102         return 1;
1103 }
1104
1105 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1106                                            struct btrfs_root *root,
1107                                            struct btrfs_path *path,
1108                                            u64 bytenr, u64 parent,
1109                                            u64 root_objectid,
1110                                            u64 owner, u64 offset)
1111 {
1112         struct btrfs_key key;
1113         struct btrfs_extent_data_ref *ref;
1114         struct extent_buffer *leaf;
1115         u32 nritems;
1116         int ret;
1117         int recow;
1118         int err = -ENOENT;
1119
1120         key.objectid = bytenr;
1121         if (parent) {
1122                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1123                 key.offset = parent;
1124         } else {
1125                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1126                 key.offset = hash_extent_data_ref(root_objectid,
1127                                                   owner, offset);
1128         }
1129 again:
1130         recow = 0;
1131         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1132         if (ret < 0) {
1133                 err = ret;
1134                 goto fail;
1135         }
1136
1137         if (parent) {
1138                 if (!ret)
1139                         return 0;
1140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1141                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1142                 btrfs_release_path(path);
1143                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1144                 if (ret < 0) {
1145                         err = ret;
1146                         goto fail;
1147                 }
1148                 if (!ret)
1149                         return 0;
1150 #endif
1151                 goto fail;
1152         }
1153
1154         leaf = path->nodes[0];
1155         nritems = btrfs_header_nritems(leaf);
1156         while (1) {
1157                 if (path->slots[0] >= nritems) {
1158                         ret = btrfs_next_leaf(root, path);
1159                         if (ret < 0)
1160                                 err = ret;
1161                         if (ret)
1162                                 goto fail;
1163
1164                         leaf = path->nodes[0];
1165                         nritems = btrfs_header_nritems(leaf);
1166                         recow = 1;
1167                 }
1168
1169                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170                 if (key.objectid != bytenr ||
1171                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1172                         goto fail;
1173
1174                 ref = btrfs_item_ptr(leaf, path->slots[0],
1175                                      struct btrfs_extent_data_ref);
1176
1177                 if (match_extent_data_ref(leaf, ref, root_objectid,
1178                                           owner, offset)) {
1179                         if (recow) {
1180                                 btrfs_release_path(path);
1181                                 goto again;
1182                         }
1183                         err = 0;
1184                         break;
1185                 }
1186                 path->slots[0]++;
1187         }
1188 fail:
1189         return err;
1190 }
1191
1192 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1193                                            struct btrfs_root *root,
1194                                            struct btrfs_path *path,
1195                                            u64 bytenr, u64 parent,
1196                                            u64 root_objectid, u64 owner,
1197                                            u64 offset, int refs_to_add)
1198 {
1199         struct btrfs_key key;
1200         struct extent_buffer *leaf;
1201         u32 size;
1202         u32 num_refs;
1203         int ret;
1204
1205         key.objectid = bytenr;
1206         if (parent) {
1207                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1208                 key.offset = parent;
1209                 size = sizeof(struct btrfs_shared_data_ref);
1210         } else {
1211                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1212                 key.offset = hash_extent_data_ref(root_objectid,
1213                                                   owner, offset);
1214                 size = sizeof(struct btrfs_extent_data_ref);
1215         }
1216
1217         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1218         if (ret && ret != -EEXIST)
1219                 goto fail;
1220
1221         leaf = path->nodes[0];
1222         if (parent) {
1223                 struct btrfs_shared_data_ref *ref;
1224                 ref = btrfs_item_ptr(leaf, path->slots[0],
1225                                      struct btrfs_shared_data_ref);
1226                 if (ret == 0) {
1227                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228                 } else {
1229                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1230                         num_refs += refs_to_add;
1231                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1232                 }
1233         } else {
1234                 struct btrfs_extent_data_ref *ref;
1235                 while (ret == -EEXIST) {
1236                         ref = btrfs_item_ptr(leaf, path->slots[0],
1237                                              struct btrfs_extent_data_ref);
1238                         if (match_extent_data_ref(leaf, ref, root_objectid,
1239                                                   owner, offset))
1240                                 break;
1241                         btrfs_release_path(path);
1242                         key.offset++;
1243                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1244                                                       size);
1245                         if (ret && ret != -EEXIST)
1246                                 goto fail;
1247
1248                         leaf = path->nodes[0];
1249                 }
1250                 ref = btrfs_item_ptr(leaf, path->slots[0],
1251                                      struct btrfs_extent_data_ref);
1252                 if (ret == 0) {
1253                         btrfs_set_extent_data_ref_root(leaf, ref,
1254                                                        root_objectid);
1255                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1256                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1257                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258                 } else {
1259                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1260                         num_refs += refs_to_add;
1261                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1262                 }
1263         }
1264         btrfs_mark_buffer_dirty(leaf);
1265         ret = 0;
1266 fail:
1267         btrfs_release_path(path);
1268         return ret;
1269 }
1270
1271 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1272                                            struct btrfs_root *root,
1273                                            struct btrfs_path *path,
1274                                            int refs_to_drop)
1275 {
1276         struct btrfs_key key;
1277         struct btrfs_extent_data_ref *ref1 = NULL;
1278         struct btrfs_shared_data_ref *ref2 = NULL;
1279         struct extent_buffer *leaf;
1280         u32 num_refs = 0;
1281         int ret = 0;
1282
1283         leaf = path->nodes[0];
1284         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285
1286         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1287                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_data_ref);
1289                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1290         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1291                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1292                                       struct btrfs_shared_data_ref);
1293                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1295         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1296                 struct btrfs_extent_ref_v0 *ref0;
1297                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1298                                       struct btrfs_extent_ref_v0);
1299                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1300 #endif
1301         } else {
1302                 BUG();
1303         }
1304
1305         BUG_ON(num_refs < refs_to_drop);
1306         num_refs -= refs_to_drop;
1307
1308         if (num_refs == 0) {
1309                 ret = btrfs_del_item(trans, root, path);
1310         } else {
1311                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1312                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1313                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1314                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316                 else {
1317                         struct btrfs_extent_ref_v0 *ref0;
1318                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1319                                         struct btrfs_extent_ref_v0);
1320                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1321                 }
1322 #endif
1323                 btrfs_mark_buffer_dirty(leaf);
1324         }
1325         return ret;
1326 }
1327
1328 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1329                                           struct btrfs_path *path,
1330                                           struct btrfs_extent_inline_ref *iref)
1331 {
1332         struct btrfs_key key;
1333         struct extent_buffer *leaf;
1334         struct btrfs_extent_data_ref *ref1;
1335         struct btrfs_shared_data_ref *ref2;
1336         u32 num_refs = 0;
1337
1338         leaf = path->nodes[0];
1339         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340         if (iref) {
1341                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1342                     BTRFS_EXTENT_DATA_REF_KEY) {
1343                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1344                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345                 } else {
1346                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1347                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348                 }
1349         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1350                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_data_ref);
1352                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1353         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1354                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_shared_data_ref);
1356                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1359                 struct btrfs_extent_ref_v0 *ref0;
1360                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1361                                       struct btrfs_extent_ref_v0);
1362                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1363 #endif
1364         } else {
1365                 WARN_ON(1);
1366         }
1367         return num_refs;
1368 }
1369
1370 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1389         if (ret > 0)
1390                 ret = -ENOENT;
1391 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1392         if (ret == -ENOENT && parent) {
1393                 btrfs_release_path(path);
1394                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1395                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1396                 if (ret > 0)
1397                         ret = -ENOENT;
1398         }
1399 #endif
1400         return ret;
1401 }
1402
1403 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1404                                           struct btrfs_root *root,
1405                                           struct btrfs_path *path,
1406                                           u64 bytenr, u64 parent,
1407                                           u64 root_objectid)
1408 {
1409         struct btrfs_key key;
1410         int ret;
1411
1412         key.objectid = bytenr;
1413         if (parent) {
1414                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1415                 key.offset = parent;
1416         } else {
1417                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1418                 key.offset = root_objectid;
1419         }
1420
1421         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1422         btrfs_release_path(path);
1423         return ret;
1424 }
1425
1426 static inline int extent_ref_type(u64 parent, u64 owner)
1427 {
1428         int type;
1429         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430                 if (parent > 0)
1431                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1432                 else
1433                         type = BTRFS_TREE_BLOCK_REF_KEY;
1434         } else {
1435                 if (parent > 0)
1436                         type = BTRFS_SHARED_DATA_REF_KEY;
1437                 else
1438                         type = BTRFS_EXTENT_DATA_REF_KEY;
1439         }
1440         return type;
1441 }
1442
1443 static int find_next_key(struct btrfs_path *path, int level,
1444                          struct btrfs_key *key)
1445
1446 {
1447         for (; level < BTRFS_MAX_LEVEL; level++) {
1448                 if (!path->nodes[level])
1449                         break;
1450                 if (path->slots[level] + 1 >=
1451                     btrfs_header_nritems(path->nodes[level]))
1452                         continue;
1453                 if (level == 0)
1454                         btrfs_item_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 else
1457                         btrfs_node_key_to_cpu(path->nodes[level], key,
1458                                               path->slots[level] + 1);
1459                 return 0;
1460         }
1461         return 1;
1462 }
1463
1464 /*
1465  * look for inline back ref. if back ref is found, *ref_ret is set
1466  * to the address of inline back ref, and 0 is returned.
1467  *
1468  * if back ref isn't found, *ref_ret is set to the address where it
1469  * should be inserted, and -ENOENT is returned.
1470  *
1471  * if insert is true and there are too many inline back refs, the path
1472  * points to the extent item, and -EAGAIN is returned.
1473  *
1474  * NOTE: inline back refs are ordered in the same way that back ref
1475  *       items in the tree are ordered.
1476  */
1477 static noinline_for_stack
1478 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1479                                  struct btrfs_root *root,
1480                                  struct btrfs_path *path,
1481                                  struct btrfs_extent_inline_ref **ref_ret,
1482                                  u64 bytenr, u64 num_bytes,
1483                                  u64 parent, u64 root_objectid,
1484                                  u64 owner, u64 offset, int insert)
1485 {
1486         struct btrfs_key key;
1487         struct extent_buffer *leaf;
1488         struct btrfs_extent_item *ei;
1489         struct btrfs_extent_inline_ref *iref;
1490         u64 flags;
1491         u64 item_size;
1492         unsigned long ptr;
1493         unsigned long end;
1494         int extra_size;
1495         int type;
1496         int want;
1497         int ret;
1498         int err = 0;
1499         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1500                                                  SKINNY_METADATA);
1501
1502         key.objectid = bytenr;
1503         key.type = BTRFS_EXTENT_ITEM_KEY;
1504         key.offset = num_bytes;
1505
1506         want = extent_ref_type(parent, owner);
1507         if (insert) {
1508                 extra_size = btrfs_extent_inline_ref_size(want);
1509                 path->keep_locks = 1;
1510         } else
1511                 extra_size = -1;
1512
1513         /*
1514          * Owner is our parent level, so we can just add one to get the level
1515          * for the block we are interested in.
1516          */
1517         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1518                 key.type = BTRFS_METADATA_ITEM_KEY;
1519                 key.offset = owner;
1520         }
1521
1522 again:
1523         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1524         if (ret < 0) {
1525                 err = ret;
1526                 goto out;
1527         }
1528
1529         /*
1530          * We may be a newly converted file system which still has the old fat
1531          * extent entries for metadata, so try and see if we have one of those.
1532          */
1533         if (ret > 0 && skinny_metadata) {
1534                 skinny_metadata = false;
1535                 if (path->slots[0]) {
1536                         path->slots[0]--;
1537                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1538                                               path->slots[0]);
1539                         if (key.objectid == bytenr &&
1540                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1541                             key.offset == num_bytes)
1542                                 ret = 0;
1543                 }
1544                 if (ret) {
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982
1983         path = btrfs_alloc_path();
1984         if (!path)
1985                 return -ENOMEM;
1986
1987         path->reada = 1;
1988         path->leave_spinning = 1;
1989         /* this will setup the path even if it fails to insert the back ref */
1990         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991                                            path, bytenr, num_bytes, parent,
1992                                            root_objectid, owner, offset,
1993                                            refs_to_add, extent_op);
1994         if (ret != -EAGAIN)
1995                 goto out;
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         trace_run_delayed_data_ref(node, ref, node->action);
2040
2041         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042                 parent = ref->parent;
2043         else
2044                 ref_root = ref->root;
2045
2046         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2047                 if (extent_op)
2048                         flags |= extent_op->flags_to_set;
2049                 ret = alloc_reserved_file_extent(trans, root,
2050                                                  parent, ref_root, flags,
2051                                                  ref->objectid, ref->offset,
2052                                                  &ins, node->ref_mod);
2053         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055                                              node->num_bytes, parent,
2056                                              ref_root, ref->objectid,
2057                                              ref->offset, node->ref_mod,
2058                                              extent_op);
2059         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061                                           node->num_bytes, parent,
2062                                           ref_root, ref->objectid,
2063                                           ref->offset, node->ref_mod,
2064                                           extent_op);
2065         } else {
2066                 BUG();
2067         }
2068         return ret;
2069 }
2070
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072                                     struct extent_buffer *leaf,
2073                                     struct btrfs_extent_item *ei)
2074 {
2075         u64 flags = btrfs_extent_flags(leaf, ei);
2076         if (extent_op->update_flags) {
2077                 flags |= extent_op->flags_to_set;
2078                 btrfs_set_extent_flags(leaf, ei, flags);
2079         }
2080
2081         if (extent_op->update_key) {
2082                 struct btrfs_tree_block_info *bi;
2083                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2086         }
2087 }
2088
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090                                  struct btrfs_root *root,
2091                                  struct btrfs_delayed_ref_node *node,
2092                                  struct btrfs_delayed_extent_op *extent_op)
2093 {
2094         struct btrfs_key key;
2095         struct btrfs_path *path;
2096         struct btrfs_extent_item *ei;
2097         struct extent_buffer *leaf;
2098         u32 item_size;
2099         int ret;
2100         int err = 0;
2101         int metadata = !extent_op->is_data;
2102
2103         if (trans->aborted)
2104                 return 0;
2105
2106         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2107                 metadata = 0;
2108
2109         path = btrfs_alloc_path();
2110         if (!path)
2111                 return -ENOMEM;
2112
2113         key.objectid = node->bytenr;
2114
2115         if (metadata) {
2116                 key.type = BTRFS_METADATA_ITEM_KEY;
2117                 key.offset = extent_op->level;
2118         } else {
2119                 key.type = BTRFS_EXTENT_ITEM_KEY;
2120                 key.offset = node->num_bytes;
2121         }
2122
2123 again:
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2127                                 path, 0, 1);
2128         if (ret < 0) {
2129                 err = ret;
2130                 goto out;
2131         }
2132         if (ret > 0) {
2133                 if (metadata) {
2134                         if (path->slots[0] > 0) {
2135                                 path->slots[0]--;
2136                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2137                                                       path->slots[0]);
2138                                 if (key.objectid == node->bytenr &&
2139                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2140                                     key.offset == node->num_bytes)
2141                                         ret = 0;
2142                         }
2143                         if (ret > 0) {
2144                                 btrfs_release_path(path);
2145                                 metadata = 0;
2146
2147                                 key.objectid = node->bytenr;
2148                                 key.offset = node->num_bytes;
2149                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2150                                 goto again;
2151                         }
2152                 } else {
2153                         err = -EIO;
2154                         goto out;
2155                 }
2156         }
2157
2158         leaf = path->nodes[0];
2159         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161         if (item_size < sizeof(*ei)) {
2162                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2163                                              path, (u64)-1, 0);
2164                 if (ret < 0) {
2165                         err = ret;
2166                         goto out;
2167                 }
2168                 leaf = path->nodes[0];
2169                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170         }
2171 #endif
2172         BUG_ON(item_size < sizeof(*ei));
2173         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2174         __run_delayed_extent_op(extent_op, leaf, ei);
2175
2176         btrfs_mark_buffer_dirty(leaf);
2177 out:
2178         btrfs_free_path(path);
2179         return err;
2180 }
2181
2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2183                                 struct btrfs_root *root,
2184                                 struct btrfs_delayed_ref_node *node,
2185                                 struct btrfs_delayed_extent_op *extent_op,
2186                                 int insert_reserved)
2187 {
2188         int ret = 0;
2189         struct btrfs_delayed_tree_ref *ref;
2190         struct btrfs_key ins;
2191         u64 parent = 0;
2192         u64 ref_root = 0;
2193         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2194                                                  SKINNY_METADATA);
2195
2196         ref = btrfs_delayed_node_to_tree_ref(node);
2197         trace_run_delayed_tree_ref(node, ref, node->action);
2198
2199         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2200                 parent = ref->parent;
2201         else
2202                 ref_root = ref->root;
2203
2204         ins.objectid = node->bytenr;
2205         if (skinny_metadata) {
2206                 ins.offset = ref->level;
2207                 ins.type = BTRFS_METADATA_ITEM_KEY;
2208         } else {
2209                 ins.offset = node->num_bytes;
2210                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2211         }
2212
2213         BUG_ON(node->ref_mod != 1);
2214         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2215                 BUG_ON(!extent_op || !extent_op->update_flags);
2216                 ret = alloc_reserved_tree_block(trans, root,
2217                                                 parent, ref_root,
2218                                                 extent_op->flags_to_set,
2219                                                 &extent_op->key,
2220                                                 ref->level, &ins);
2221         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2222                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2223                                              node->num_bytes, parent, ref_root,
2224                                              ref->level, 0, 1, extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2227                                           node->num_bytes, parent, ref_root,
2228                                           ref->level, 0, 1, extent_op);
2229         } else {
2230                 BUG();
2231         }
2232         return ret;
2233 }
2234
2235 /* helper function to actually process a single delayed ref entry */
2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2237                                struct btrfs_root *root,
2238                                struct btrfs_delayed_ref_node *node,
2239                                struct btrfs_delayed_extent_op *extent_op,
2240                                int insert_reserved)
2241 {
2242         int ret = 0;
2243
2244         if (trans->aborted) {
2245                 if (insert_reserved)
2246                         btrfs_pin_extent(root, node->bytenr,
2247                                          node->num_bytes, 1);
2248                 return 0;
2249         }
2250
2251         if (btrfs_delayed_ref_is_head(node)) {
2252                 struct btrfs_delayed_ref_head *head;
2253                 /*
2254                  * we've hit the end of the chain and we were supposed
2255                  * to insert this extent into the tree.  But, it got
2256                  * deleted before we ever needed to insert it, so all
2257                  * we have to do is clean up the accounting
2258                  */
2259                 BUG_ON(extent_op);
2260                 head = btrfs_delayed_node_to_head(node);
2261                 trace_run_delayed_ref_head(node, head, node->action);
2262
2263                 if (insert_reserved) {
2264                         btrfs_pin_extent(root, node->bytenr,
2265                                          node->num_bytes, 1);
2266                         if (head->is_data) {
2267                                 ret = btrfs_del_csums(trans, root,
2268                                                       node->bytenr,
2269                                                       node->num_bytes);
2270                         }
2271                 }
2272                 return ret;
2273         }
2274
2275         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2276             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2277                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2278                                            insert_reserved);
2279         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2280                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2281                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2282                                            insert_reserved);
2283         else
2284                 BUG();
2285         return ret;
2286 }
2287
2288 static noinline struct btrfs_delayed_ref_node *
2289 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2290 {
2291         struct rb_node *node;
2292         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2293
2294         /*
2295          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2296          * this prevents ref count from going down to zero when
2297          * there still are pending delayed ref.
2298          */
2299         node = rb_first(&head->ref_root);
2300         while (node) {
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2304                         return ref;
2305                 else if (last == NULL)
2306                         last = ref;
2307                 node = rb_next(node);
2308         }
2309         return last;
2310 }
2311
2312 /*
2313  * Returns 0 on success or if called with an already aborted transaction.
2314  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2315  */
2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2317                                              struct btrfs_root *root,
2318                                              unsigned long nr)
2319 {
2320         struct btrfs_delayed_ref_root *delayed_refs;
2321         struct btrfs_delayed_ref_node *ref;
2322         struct btrfs_delayed_ref_head *locked_ref = NULL;
2323         struct btrfs_delayed_extent_op *extent_op;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325         ktime_t start = ktime_get();
2326         int ret;
2327         unsigned long count = 0;
2328         unsigned long actual_count = 0;
2329         int must_insert_reserved = 0;
2330
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         while (1) {
2333                 if (!locked_ref) {
2334                         if (count >= nr)
2335                                 break;
2336
2337                         spin_lock(&delayed_refs->lock);
2338                         locked_ref = btrfs_select_ref_head(trans);
2339                         if (!locked_ref) {
2340                                 spin_unlock(&delayed_refs->lock);
2341                                 break;
2342                         }
2343
2344                         /* grab the lock that says we are going to process
2345                          * all the refs for this head */
2346                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2347                         spin_unlock(&delayed_refs->lock);
2348                         /*
2349                          * we may have dropped the spin lock to get the head
2350                          * mutex lock, and that might have given someone else
2351                          * time to free the head.  If that's true, it has been
2352                          * removed from our list and we can move on.
2353                          */
2354                         if (ret == -EAGAIN) {
2355                                 locked_ref = NULL;
2356                                 count++;
2357                                 continue;
2358                         }
2359                 }
2360
2361                 /*
2362                  * We need to try and merge add/drops of the same ref since we
2363                  * can run into issues with relocate dropping the implicit ref
2364                  * and then it being added back again before the drop can
2365                  * finish.  If we merged anything we need to re-loop so we can
2366                  * get a good ref.
2367                  */
2368                 spin_lock(&locked_ref->lock);
2369                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2370                                          locked_ref);
2371
2372                 /*
2373                  * locked_ref is the head node, so we have to go one
2374                  * node back for any delayed ref updates
2375                  */
2376                 ref = select_delayed_ref(locked_ref);
2377
2378                 if (ref && ref->seq &&
2379                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2380                         spin_unlock(&locked_ref->lock);
2381                         btrfs_delayed_ref_unlock(locked_ref);
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref->processing = 0;
2384                         delayed_refs->num_heads_ready++;
2385                         spin_unlock(&delayed_refs->lock);
2386                         locked_ref = NULL;
2387                         cond_resched();
2388                         count++;
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403
2404
2405                         /* All delayed refs have been processed, Go ahead
2406                          * and send the head node to run_one_delayed_ref,
2407                          * so that any accounting fixes can happen
2408                          */
2409                         ref = &locked_ref->node;
2410
2411                         if (extent_op && must_insert_reserved) {
2412                                 btrfs_free_delayed_extent_op(extent_op);
2413                                 extent_op = NULL;
2414                         }
2415
2416                         if (extent_op) {
2417                                 spin_unlock(&locked_ref->lock);
2418                                 ret = run_delayed_extent_op(trans, root,
2419                                                             ref, extent_op);
2420                                 btrfs_free_delayed_extent_op(extent_op);
2421
2422                                 if (ret) {
2423                                         /*
2424                                          * Need to reset must_insert_reserved if
2425                                          * there was an error so the abort stuff
2426                                          * can cleanup the reserved space
2427                                          * properly.
2428                                          */
2429                                         if (must_insert_reserved)
2430                                                 locked_ref->must_insert_reserved = 1;
2431                                         locked_ref->processing = 0;
2432                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2433                                         btrfs_delayed_ref_unlock(locked_ref);
2434                                         return ret;
2435                                 }
2436                                 continue;
2437                         }
2438
2439                         /*
2440                          * Need to drop our head ref lock and re-aqcuire the
2441                          * delayed ref lock and then re-check to make sure
2442                          * nobody got added.
2443                          */
2444                         spin_unlock(&locked_ref->lock);
2445                         spin_lock(&delayed_refs->lock);
2446                         spin_lock(&locked_ref->lock);
2447                         if (rb_first(&locked_ref->ref_root) ||
2448                             locked_ref->extent_op) {
2449                                 spin_unlock(&locked_ref->lock);
2450                                 spin_unlock(&delayed_refs->lock);
2451                                 continue;
2452                         }
2453                         ref->in_tree = 0;
2454                         delayed_refs->num_heads--;
2455                         rb_erase(&locked_ref->href_node,
2456                                  &delayed_refs->href_root);
2457                         spin_unlock(&delayed_refs->lock);
2458                 } else {
2459                         actual_count++;
2460                         ref->in_tree = 0;
2461                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2462                 }
2463                 atomic_dec(&delayed_refs->num_entries);
2464
2465                 if (!btrfs_delayed_ref_is_head(ref)) {
2466                         /*
2467                          * when we play the delayed ref, also correct the
2468                          * ref_mod on head
2469                          */
2470                         switch (ref->action) {
2471                         case BTRFS_ADD_DELAYED_REF:
2472                         case BTRFS_ADD_DELAYED_EXTENT:
2473                                 locked_ref->node.ref_mod -= ref->ref_mod;
2474                                 break;
2475                         case BTRFS_DROP_DELAYED_REF:
2476                                 locked_ref->node.ref_mod += ref->ref_mod;
2477                                 break;
2478                         default:
2479                                 WARN_ON(1);
2480                         }
2481                 }
2482                 spin_unlock(&locked_ref->lock);
2483
2484                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2485                                           must_insert_reserved);
2486
2487                 btrfs_free_delayed_extent_op(extent_op);
2488                 if (ret) {
2489                         locked_ref->processing = 0;
2490                         btrfs_delayed_ref_unlock(locked_ref);
2491                         btrfs_put_delayed_ref(ref);
2492                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2493                         return ret;
2494                 }
2495
2496                 /*
2497                  * If this node is a head, that means all the refs in this head
2498                  * have been dealt with, and we will pick the next head to deal
2499                  * with, so we must unlock the head and drop it from the cluster
2500                  * list before we release it.
2501                  */
2502                 if (btrfs_delayed_ref_is_head(ref)) {
2503                         btrfs_delayed_ref_unlock(locked_ref);
2504                         locked_ref = NULL;
2505                 }
2506                 btrfs_put_delayed_ref(ref);
2507                 count++;
2508                 cond_resched();
2509         }
2510
2511         /*
2512          * We don't want to include ref heads since we can have empty ref heads
2513          * and those will drastically skew our runtime down since we just do
2514          * accounting, no actual extent tree updates.
2515          */
2516         if (actual_count > 0) {
2517                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2518                 u64 avg;
2519
2520                 /*
2521                  * We weigh the current average higher than our current runtime
2522                  * to avoid large swings in the average.
2523                  */
2524                 spin_lock(&delayed_refs->lock);
2525                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2526                 avg = div64_u64(avg, 4);
2527                 fs_info->avg_delayed_ref_runtime = avg;
2528                 spin_unlock(&delayed_refs->lock);
2529         }
2530         return 0;
2531 }
2532
2533 #ifdef SCRAMBLE_DELAYED_REFS
2534 /*
2535  * Normally delayed refs get processed in ascending bytenr order. This
2536  * correlates in most cases to the order added. To expose dependencies on this
2537  * order, we start to process the tree in the middle instead of the beginning
2538  */
2539 static u64 find_middle(struct rb_root *root)
2540 {
2541         struct rb_node *n = root->rb_node;
2542         struct btrfs_delayed_ref_node *entry;
2543         int alt = 1;
2544         u64 middle;
2545         u64 first = 0, last = 0;
2546
2547         n = rb_first(root);
2548         if (n) {
2549                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2550                 first = entry->bytenr;
2551         }
2552         n = rb_last(root);
2553         if (n) {
2554                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2555                 last = entry->bytenr;
2556         }
2557         n = root->rb_node;
2558
2559         while (n) {
2560                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2561                 WARN_ON(!entry->in_tree);
2562
2563                 middle = entry->bytenr;
2564
2565                 if (alt)
2566                         n = n->rb_left;
2567                 else
2568                         n = n->rb_right;
2569
2570                 alt = 1 - alt;
2571         }
2572         return middle;
2573 }
2574 #endif
2575
2576 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2577                                          struct btrfs_fs_info *fs_info)
2578 {
2579         struct qgroup_update *qgroup_update;
2580         int ret = 0;
2581
2582         if (list_empty(&trans->qgroup_ref_list) !=
2583             !trans->delayed_ref_elem.seq) {
2584                 /* list without seq or seq without list */
2585                 btrfs_err(fs_info,
2586                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2587                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2588                         (u32)(trans->delayed_ref_elem.seq >> 32),
2589                         (u32)trans->delayed_ref_elem.seq);
2590                 BUG();
2591         }
2592
2593         if (!trans->delayed_ref_elem.seq)
2594                 return 0;
2595
2596         while (!list_empty(&trans->qgroup_ref_list)) {
2597                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2598                                                  struct qgroup_update, list);
2599                 list_del(&qgroup_update->list);
2600                 if (!ret)
2601                         ret = btrfs_qgroup_account_ref(
2602                                         trans, fs_info, qgroup_update->node,
2603                                         qgroup_update->extent_op);
2604                 kfree(qgroup_update);
2605         }
2606
2607         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2608
2609         return ret;
2610 }
2611
2612 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2613 {
2614         u64 num_bytes;
2615
2616         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2617                              sizeof(struct btrfs_extent_inline_ref));
2618         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2619                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2620
2621         /*
2622          * We don't ever fill up leaves all the way so multiply by 2 just to be
2623          * closer to what we're really going to want to ouse.
2624          */
2625         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2626 }
2627
2628 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2629                                        struct btrfs_root *root)
2630 {
2631         struct btrfs_block_rsv *global_rsv;
2632         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2633         u64 num_bytes;
2634         int ret = 0;
2635
2636         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2637         num_heads = heads_to_leaves(root, num_heads);
2638         if (num_heads > 1)
2639                 num_bytes += (num_heads - 1) * root->leafsize;
2640         num_bytes <<= 1;
2641         global_rsv = &root->fs_info->global_block_rsv;
2642
2643         /*
2644          * If we can't allocate any more chunks lets make sure we have _lots_ of
2645          * wiggle room since running delayed refs can create more delayed refs.
2646          */
2647         if (global_rsv->space_info->full)
2648                 num_bytes <<= 1;
2649
2650         spin_lock(&global_rsv->lock);
2651         if (global_rsv->reserved <= num_bytes)
2652                 ret = 1;
2653         spin_unlock(&global_rsv->lock);
2654         return ret;
2655 }
2656
2657 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2658                                        struct btrfs_root *root)
2659 {
2660         struct btrfs_fs_info *fs_info = root->fs_info;
2661         u64 num_entries =
2662                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2663         u64 avg_runtime;
2664
2665         smp_mb();
2666         avg_runtime = fs_info->avg_delayed_ref_runtime;
2667         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2668                 return 1;
2669
2670         return btrfs_check_space_for_delayed_refs(trans, root);
2671 }
2672
2673 /*
2674  * this starts processing the delayed reference count updates and
2675  * extent insertions we have queued up so far.  count can be
2676  * 0, which means to process everything in the tree at the start
2677  * of the run (but not newly added entries), or it can be some target
2678  * number you'd like to process.
2679  *
2680  * Returns 0 on success or if called with an aborted transaction
2681  * Returns <0 on error and aborts the transaction
2682  */
2683 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2684                            struct btrfs_root *root, unsigned long count)
2685 {
2686         struct rb_node *node;
2687         struct btrfs_delayed_ref_root *delayed_refs;
2688         struct btrfs_delayed_ref_head *head;
2689         int ret;
2690         int run_all = count == (unsigned long)-1;
2691         int run_most = 0;
2692
2693         /* We'll clean this up in btrfs_cleanup_transaction */
2694         if (trans->aborted)
2695                 return 0;
2696
2697         if (root == root->fs_info->extent_root)
2698                 root = root->fs_info->tree_root;
2699
2700         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2701
2702         delayed_refs = &trans->transaction->delayed_refs;
2703         if (count == 0) {
2704                 count = atomic_read(&delayed_refs->num_entries) * 2;
2705                 run_most = 1;
2706         }
2707
2708 again:
2709 #ifdef SCRAMBLE_DELAYED_REFS
2710         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2711 #endif
2712         ret = __btrfs_run_delayed_refs(trans, root, count);
2713         if (ret < 0) {
2714                 btrfs_abort_transaction(trans, root, ret);
2715                 return ret;
2716         }
2717
2718         if (run_all) {
2719                 if (!list_empty(&trans->new_bgs))
2720                         btrfs_create_pending_block_groups(trans, root);
2721
2722                 spin_lock(&delayed_refs->lock);
2723                 node = rb_first(&delayed_refs->href_root);
2724                 if (!node) {
2725                         spin_unlock(&delayed_refs->lock);
2726                         goto out;
2727                 }
2728                 count = (unsigned long)-1;
2729
2730                 while (node) {
2731                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2732                                         href_node);
2733                         if (btrfs_delayed_ref_is_head(&head->node)) {
2734                                 struct btrfs_delayed_ref_node *ref;
2735
2736                                 ref = &head->node;
2737                                 atomic_inc(&ref->refs);
2738
2739                                 spin_unlock(&delayed_refs->lock);
2740                                 /*
2741                                  * Mutex was contended, block until it's
2742                                  * released and try again
2743                                  */
2744                                 mutex_lock(&head->mutex);
2745                                 mutex_unlock(&head->mutex);
2746
2747                                 btrfs_put_delayed_ref(ref);
2748                                 cond_resched();
2749                                 goto again;
2750                         } else {
2751                                 WARN_ON(1);
2752                         }
2753                         node = rb_next(node);
2754                 }
2755                 spin_unlock(&delayed_refs->lock);
2756                 cond_resched();
2757                 goto again;
2758         }
2759 out:
2760         assert_qgroups_uptodate(trans);
2761         return 0;
2762 }
2763
2764 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2765                                 struct btrfs_root *root,
2766                                 u64 bytenr, u64 num_bytes, u64 flags,
2767                                 int level, int is_data)
2768 {
2769         struct btrfs_delayed_extent_op *extent_op;
2770         int ret;
2771
2772         extent_op = btrfs_alloc_delayed_extent_op();
2773         if (!extent_op)
2774                 return -ENOMEM;
2775
2776         extent_op->flags_to_set = flags;
2777         extent_op->update_flags = 1;
2778         extent_op->update_key = 0;
2779         extent_op->is_data = is_data ? 1 : 0;
2780         extent_op->level = level;
2781
2782         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2783                                           num_bytes, extent_op);
2784         if (ret)
2785                 btrfs_free_delayed_extent_op(extent_op);
2786         return ret;
2787 }
2788
2789 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2790                                       struct btrfs_root *root,
2791                                       struct btrfs_path *path,
2792                                       u64 objectid, u64 offset, u64 bytenr)
2793 {
2794         struct btrfs_delayed_ref_head *head;
2795         struct btrfs_delayed_ref_node *ref;
2796         struct btrfs_delayed_data_ref *data_ref;
2797         struct btrfs_delayed_ref_root *delayed_refs;
2798         struct rb_node *node;
2799         int ret = 0;
2800
2801         delayed_refs = &trans->transaction->delayed_refs;
2802         spin_lock(&delayed_refs->lock);
2803         head = btrfs_find_delayed_ref_head(trans, bytenr);
2804         if (!head) {
2805                 spin_unlock(&delayed_refs->lock);
2806                 return 0;
2807         }
2808
2809         if (!mutex_trylock(&head->mutex)) {
2810                 atomic_inc(&head->node.refs);
2811                 spin_unlock(&delayed_refs->lock);
2812
2813                 btrfs_release_path(path);
2814
2815                 /*
2816                  * Mutex was contended, block until it's released and let
2817                  * caller try again
2818                  */
2819                 mutex_lock(&head->mutex);
2820                 mutex_unlock(&head->mutex);
2821                 btrfs_put_delayed_ref(&head->node);
2822                 return -EAGAIN;
2823         }
2824         spin_unlock(&delayed_refs->lock);
2825
2826         spin_lock(&head->lock);
2827         node = rb_first(&head->ref_root);
2828         while (node) {
2829                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2830                 node = rb_next(node);
2831
2832                 /* If it's a shared ref we know a cross reference exists */
2833                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2834                         ret = 1;
2835                         break;
2836                 }
2837
2838                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2839
2840                 /*
2841                  * If our ref doesn't match the one we're currently looking at
2842                  * then we have a cross reference.
2843                  */
2844                 if (data_ref->root != root->root_key.objectid ||
2845                     data_ref->objectid != objectid ||
2846                     data_ref->offset != offset) {
2847                         ret = 1;
2848                         break;
2849                 }
2850         }
2851         spin_unlock(&head->lock);
2852         mutex_unlock(&head->mutex);
2853         return ret;
2854 }
2855
2856 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2857                                         struct btrfs_root *root,
2858                                         struct btrfs_path *path,
2859                                         u64 objectid, u64 offset, u64 bytenr)
2860 {
2861         struct btrfs_root *extent_root = root->fs_info->extent_root;
2862         struct extent_buffer *leaf;
2863         struct btrfs_extent_data_ref *ref;
2864         struct btrfs_extent_inline_ref *iref;
2865         struct btrfs_extent_item *ei;
2866         struct btrfs_key key;
2867         u32 item_size;
2868         int ret;
2869
2870         key.objectid = bytenr;
2871         key.offset = (u64)-1;
2872         key.type = BTRFS_EXTENT_ITEM_KEY;
2873
2874         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2875         if (ret < 0)
2876                 goto out;
2877         BUG_ON(ret == 0); /* Corruption */
2878
2879         ret = -ENOENT;
2880         if (path->slots[0] == 0)
2881                 goto out;
2882
2883         path->slots[0]--;
2884         leaf = path->nodes[0];
2885         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2886
2887         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2888                 goto out;
2889
2890         ret = 1;
2891         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2892 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2893         if (item_size < sizeof(*ei)) {
2894                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2895                 goto out;
2896         }
2897 #endif
2898         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2899
2900         if (item_size != sizeof(*ei) +
2901             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2902                 goto out;
2903
2904         if (btrfs_extent_generation(leaf, ei) <=
2905             btrfs_root_last_snapshot(&root->root_item))
2906                 goto out;
2907
2908         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2909         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2910             BTRFS_EXTENT_DATA_REF_KEY)
2911                 goto out;
2912
2913         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2914         if (btrfs_extent_refs(leaf, ei) !=
2915             btrfs_extent_data_ref_count(leaf, ref) ||
2916             btrfs_extent_data_ref_root(leaf, ref) !=
2917             root->root_key.objectid ||
2918             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2919             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2920                 goto out;
2921
2922         ret = 0;
2923 out:
2924         return ret;
2925 }
2926
2927 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2928                           struct btrfs_root *root,
2929                           u64 objectid, u64 offset, u64 bytenr)
2930 {
2931         struct btrfs_path *path;
2932         int ret;
2933         int ret2;
2934
2935         path = btrfs_alloc_path();
2936         if (!path)
2937                 return -ENOENT;
2938
2939         do {
2940                 ret = check_committed_ref(trans, root, path, objectid,
2941                                           offset, bytenr);
2942                 if (ret && ret != -ENOENT)
2943                         goto out;
2944
2945                 ret2 = check_delayed_ref(trans, root, path, objectid,
2946                                          offset, bytenr);
2947         } while (ret2 == -EAGAIN);
2948
2949         if (ret2 && ret2 != -ENOENT) {
2950                 ret = ret2;
2951                 goto out;
2952         }
2953
2954         if (ret != -ENOENT || ret2 != -ENOENT)
2955                 ret = 0;
2956 out:
2957         btrfs_free_path(path);
2958         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2959                 WARN_ON(ret > 0);
2960         return ret;
2961 }
2962
2963 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2964                            struct btrfs_root *root,
2965                            struct extent_buffer *buf,
2966                            int full_backref, int inc, int for_cow)
2967 {
2968         u64 bytenr;
2969         u64 num_bytes;
2970         u64 parent;
2971         u64 ref_root;
2972         u32 nritems;
2973         struct btrfs_key key;
2974         struct btrfs_file_extent_item *fi;
2975         int i;
2976         int level;
2977         int ret = 0;
2978         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2979                             u64, u64, u64, u64, u64, u64, int);
2980
2981         ref_root = btrfs_header_owner(buf);
2982         nritems = btrfs_header_nritems(buf);
2983         level = btrfs_header_level(buf);
2984
2985         if (!root->ref_cows && level == 0)
2986                 return 0;
2987
2988         if (inc)
2989                 process_func = btrfs_inc_extent_ref;
2990         else
2991                 process_func = btrfs_free_extent;
2992
2993         if (full_backref)
2994                 parent = buf->start;
2995         else
2996                 parent = 0;
2997
2998         for (i = 0; i < nritems; i++) {
2999                 if (level == 0) {
3000                         btrfs_item_key_to_cpu(buf, &key, i);
3001                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3002                                 continue;
3003                         fi = btrfs_item_ptr(buf, i,
3004                                             struct btrfs_file_extent_item);
3005                         if (btrfs_file_extent_type(buf, fi) ==
3006                             BTRFS_FILE_EXTENT_INLINE)
3007                                 continue;
3008                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3009                         if (bytenr == 0)
3010                                 continue;
3011
3012                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3013                         key.offset -= btrfs_file_extent_offset(buf, fi);
3014                         ret = process_func(trans, root, bytenr, num_bytes,
3015                                            parent, ref_root, key.objectid,
3016                                            key.offset, for_cow);
3017                         if (ret)
3018                                 goto fail;
3019                 } else {
3020                         bytenr = btrfs_node_blockptr(buf, i);
3021                         num_bytes = btrfs_level_size(root, level - 1);
3022                         ret = process_func(trans, root, bytenr, num_bytes,
3023                                            parent, ref_root, level - 1, 0,
3024                                            for_cow);
3025                         if (ret)
3026                                 goto fail;
3027                 }
3028         }
3029         return 0;
3030 fail:
3031         return ret;
3032 }
3033
3034 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3035                   struct extent_buffer *buf, int full_backref, int for_cow)
3036 {
3037         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3038 }
3039
3040 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3041                   struct extent_buffer *buf, int full_backref, int for_cow)
3042 {
3043         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3044 }
3045
3046 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3047                                  struct btrfs_root *root,
3048                                  struct btrfs_path *path,
3049                                  struct btrfs_block_group_cache *cache)
3050 {
3051         int ret;
3052         struct btrfs_root *extent_root = root->fs_info->extent_root;
3053         unsigned long bi;
3054         struct extent_buffer *leaf;
3055
3056         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3057         if (ret < 0)
3058                 goto fail;
3059         BUG_ON(ret); /* Corruption */
3060
3061         leaf = path->nodes[0];
3062         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3063         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3064         btrfs_mark_buffer_dirty(leaf);
3065         btrfs_release_path(path);
3066 fail:
3067         if (ret) {
3068                 btrfs_abort_transaction(trans, root, ret);
3069                 return ret;
3070         }
3071         return 0;
3072
3073 }
3074
3075 static struct btrfs_block_group_cache *
3076 next_block_group(struct btrfs_root *root,
3077                  struct btrfs_block_group_cache *cache)
3078 {
3079         struct rb_node *node;
3080         spin_lock(&root->fs_info->block_group_cache_lock);
3081         node = rb_next(&cache->cache_node);
3082         btrfs_put_block_group(cache);
3083         if (node) {
3084                 cache = rb_entry(node, struct btrfs_block_group_cache,
3085                                  cache_node);
3086                 btrfs_get_block_group(cache);
3087         } else
3088                 cache = NULL;
3089         spin_unlock(&root->fs_info->block_group_cache_lock);
3090         return cache;
3091 }
3092
3093 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3094                             struct btrfs_trans_handle *trans,
3095                             struct btrfs_path *path)
3096 {
3097         struct btrfs_root *root = block_group->fs_info->tree_root;
3098         struct inode *inode = NULL;
3099         u64 alloc_hint = 0;
3100         int dcs = BTRFS_DC_ERROR;
3101         int num_pages = 0;
3102         int retries = 0;
3103         int ret = 0;
3104
3105         /*
3106          * If this block group is smaller than 100 megs don't bother caching the
3107          * block group.
3108          */
3109         if (block_group->key.offset < (100 * 1024 * 1024)) {
3110                 spin_lock(&block_group->lock);
3111                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3112                 spin_unlock(&block_group->lock);
3113                 return 0;
3114         }
3115
3116 again:
3117         inode = lookup_free_space_inode(root, block_group, path);
3118         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3119                 ret = PTR_ERR(inode);
3120                 btrfs_release_path(path);
3121                 goto out;
3122         }
3123
3124         if (IS_ERR(inode)) {
3125                 BUG_ON(retries);
3126                 retries++;
3127
3128                 if (block_group->ro)
3129                         goto out_free;
3130
3131                 ret = create_free_space_inode(root, trans, block_group, path);
3132                 if (ret)
3133                         goto out_free;
3134                 goto again;
3135         }
3136
3137         /* We've already setup this transaction, go ahead and exit */
3138         if (block_group->cache_generation == trans->transid &&
3139             i_size_read(inode)) {
3140                 dcs = BTRFS_DC_SETUP;
3141                 goto out_put;
3142         }
3143
3144         /*
3145          * We want to set the generation to 0, that way if anything goes wrong
3146          * from here on out we know not to trust this cache when we load up next
3147          * time.
3148          */
3149         BTRFS_I(inode)->generation = 0;
3150         ret = btrfs_update_inode(trans, root, inode);
3151         WARN_ON(ret);
3152
3153         if (i_size_read(inode) > 0) {
3154                 ret = btrfs_check_trunc_cache_free_space(root,
3155                                         &root->fs_info->global_block_rsv);
3156                 if (ret)
3157                         goto out_put;
3158
3159                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3160                 if (ret)
3161                         goto out_put;
3162         }
3163
3164         spin_lock(&block_group->lock);
3165         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3166             !btrfs_test_opt(root, SPACE_CACHE)) {
3167                 /*
3168                  * don't bother trying to write stuff out _if_
3169                  * a) we're not cached,
3170                  * b) we're with nospace_cache mount option.
3171                  */
3172                 dcs = BTRFS_DC_WRITTEN;
3173                 spin_unlock(&block_group->lock);
3174                 goto out_put;
3175         }
3176         spin_unlock(&block_group->lock);
3177
3178         /*
3179          * Try to preallocate enough space based on how big the block group is.
3180          * Keep in mind this has to include any pinned space which could end up
3181          * taking up quite a bit since it's not folded into the other space
3182          * cache.
3183          */
3184         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3185         if (!num_pages)
3186                 num_pages = 1;
3187
3188         num_pages *= 16;
3189         num_pages *= PAGE_CACHE_SIZE;
3190
3191         ret = btrfs_check_data_free_space(inode, num_pages);
3192         if (ret)
3193                 goto out_put;
3194
3195         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3196                                               num_pages, num_pages,
3197                                               &alloc_hint);
3198         if (!ret)
3199                 dcs = BTRFS_DC_SETUP;
3200         btrfs_free_reserved_data_space(inode, num_pages);
3201
3202 out_put:
3203         iput(inode);
3204 out_free:
3205         btrfs_release_path(path);
3206 out:
3207         spin_lock(&block_group->lock);
3208         if (!ret && dcs == BTRFS_DC_SETUP)
3209                 block_group->cache_generation = trans->transid;
3210         block_group->disk_cache_state = dcs;
3211         spin_unlock(&block_group->lock);
3212
3213         return ret;
3214 }
3215
3216 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3217                                    struct btrfs_root *root)
3218 {
3219         struct btrfs_block_group_cache *cache;
3220         int err = 0;
3221         struct btrfs_path *path;
3222         u64 last = 0;
3223
3224         path = btrfs_alloc_path();
3225         if (!path)
3226                 return -ENOMEM;
3227
3228 again:
3229         while (1) {
3230                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3231                 while (cache) {
3232                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3233                                 break;
3234                         cache = next_block_group(root, cache);
3235                 }
3236                 if (!cache) {
3237                         if (last == 0)
3238                                 break;
3239                         last = 0;
3240                         continue;
3241                 }
3242                 err = cache_save_setup(cache, trans, path);
3243                 last = cache->key.objectid + cache->key.offset;
3244                 btrfs_put_block_group(cache);
3245         }
3246
3247         while (1) {
3248                 if (last == 0) {
3249                         err = btrfs_run_delayed_refs(trans, root,
3250                                                      (unsigned long)-1);
3251                         if (err) /* File system offline */
3252                                 goto out;
3253                 }
3254
3255                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3256                 while (cache) {
3257                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3258                                 btrfs_put_block_group(cache);
3259                                 goto again;
3260                         }
3261
3262                         if (cache->dirty)
3263                                 break;
3264                         cache = next_block_group(root, cache);
3265                 }
3266                 if (!cache) {
3267                         if (last == 0)
3268                                 break;
3269                         last = 0;
3270                         continue;
3271                 }
3272
3273                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3274                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3275                 cache->dirty = 0;
3276                 last = cache->key.objectid + cache->key.offset;
3277
3278                 err = write_one_cache_group(trans, root, path, cache);
3279                 btrfs_put_block_group(cache);
3280                 if (err) /* File system offline */
3281                         goto out;
3282         }
3283
3284         while (1) {
3285                 /*
3286                  * I don't think this is needed since we're just marking our
3287                  * preallocated extent as written, but just in case it can't
3288                  * hurt.
3289                  */
3290                 if (last == 0) {
3291                         err = btrfs_run_delayed_refs(trans, root,
3292                                                      (unsigned long)-1);
3293                         if (err) /* File system offline */
3294                                 goto out;
3295                 }
3296
3297                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3298                 while (cache) {
3299                         /*
3300                          * Really this shouldn't happen, but it could if we
3301                          * couldn't write the entire preallocated extent and
3302                          * splitting the extent resulted in a new block.
3303                          */
3304                         if (cache->dirty) {
3305                                 btrfs_put_block_group(cache);
3306                                 goto again;
3307                         }
3308                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3309                                 break;
3310                         cache = next_block_group(root, cache);
3311                 }
3312                 if (!cache) {
3313                         if (last == 0)
3314                                 break;
3315                         last = 0;
3316                         continue;
3317                 }
3318
3319                 err = btrfs_write_out_cache(root, trans, cache, path);
3320
3321                 /*
3322                  * If we didn't have an error then the cache state is still
3323                  * NEED_WRITE, so we can set it to WRITTEN.
3324                  */
3325                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3326                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3327                 last = cache->key.objectid + cache->key.offset;
3328                 btrfs_put_block_group(cache);
3329         }
3330 out:
3331
3332         btrfs_free_path(path);
3333         return err;
3334 }
3335
3336 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3337 {
3338         struct btrfs_block_group_cache *block_group;
3339         int readonly = 0;
3340
3341         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3342         if (!block_group || block_group->ro)
3343                 readonly = 1;
3344         if (block_group)
3345                 btrfs_put_block_group(block_group);
3346         return readonly;
3347 }
3348
3349 static const char *alloc_name(u64 flags)
3350 {
3351         switch (flags) {
3352         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3353                 return "mixed";
3354         case BTRFS_BLOCK_GROUP_METADATA:
3355                 return "metadata";
3356         case BTRFS_BLOCK_GROUP_DATA:
3357                 return "data";
3358         case BTRFS_BLOCK_GROUP_SYSTEM:
3359                 return "system";
3360         default:
3361                 WARN_ON(1);
3362                 return "invalid-combination";
3363         };
3364 }
3365
3366 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3367                              u64 total_bytes, u64 bytes_used,
3368                              struct btrfs_space_info **space_info)
3369 {
3370         struct btrfs_space_info *found;
3371         int i;
3372         int factor;
3373         int ret;
3374
3375         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3376                      BTRFS_BLOCK_GROUP_RAID10))
3377                 factor = 2;
3378         else
3379                 factor = 1;
3380
3381         found = __find_space_info(info, flags);
3382         if (found) {
3383                 spin_lock(&found->lock);
3384                 found->total_bytes += total_bytes;
3385                 found->disk_total += total_bytes * factor;
3386                 found->bytes_used += bytes_used;
3387                 found->disk_used += bytes_used * factor;
3388                 found->full = 0;
3389                 spin_unlock(&found->lock);
3390                 *space_info = found;
3391                 return 0;
3392         }
3393         found = kzalloc(sizeof(*found), GFP_NOFS);
3394         if (!found)
3395                 return -ENOMEM;
3396
3397         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3398         if (ret) {
3399                 kfree(found);
3400                 return ret;
3401         }
3402
3403         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3404                 INIT_LIST_HEAD(&found->block_groups[i]);
3405                 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3406         }
3407         init_rwsem(&found->groups_sem);
3408         spin_lock_init(&found->lock);
3409         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3410         found->total_bytes = total_bytes;
3411         found->disk_total = total_bytes * factor;
3412         found->bytes_used = bytes_used;
3413         found->disk_used = bytes_used * factor;
3414         found->bytes_pinned = 0;
3415         found->bytes_reserved = 0;
3416         found->bytes_readonly = 0;
3417         found->bytes_may_use = 0;
3418         found->full = 0;
3419         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3420         found->chunk_alloc = 0;
3421         found->flush = 0;
3422         init_waitqueue_head(&found->wait);
3423
3424         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3425                                     info->space_info_kobj, "%s",
3426                                     alloc_name(found->flags));
3427         if (ret) {
3428                 kfree(found);
3429                 return ret;
3430         }
3431
3432         *space_info = found;
3433         list_add_rcu(&found->list, &info->space_info);
3434         if (flags & BTRFS_BLOCK_GROUP_DATA)
3435                 info->data_sinfo = found;
3436
3437         return ret;
3438 }
3439
3440 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3441 {
3442         u64 extra_flags = chunk_to_extended(flags) &
3443                                 BTRFS_EXTENDED_PROFILE_MASK;
3444
3445         write_seqlock(&fs_info->profiles_lock);
3446         if (flags & BTRFS_BLOCK_GROUP_DATA)
3447                 fs_info->avail_data_alloc_bits |= extra_flags;
3448         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3449                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3450         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3451                 fs_info->avail_system_alloc_bits |= extra_flags;
3452         write_sequnlock(&fs_info->profiles_lock);
3453 }
3454
3455 /*
3456  * returns target flags in extended format or 0 if restripe for this
3457  * chunk_type is not in progress
3458  *
3459  * should be called with either volume_mutex or balance_lock held
3460  */
3461 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3462 {
3463         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3464         u64 target = 0;
3465
3466         if (!bctl)
3467                 return 0;
3468
3469         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3470             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3471                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3472         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3473                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3474                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3475         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3476                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3477                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3478         }
3479
3480         return target;
3481 }
3482
3483 /*
3484  * @flags: available profiles in extended format (see ctree.h)
3485  *
3486  * Returns reduced profile in chunk format.  If profile changing is in
3487  * progress (either running or paused) picks the target profile (if it's
3488  * already available), otherwise falls back to plain reducing.
3489  */
3490 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3491 {
3492         /*
3493          * we add in the count of missing devices because we want
3494          * to make sure that any RAID levels on a degraded FS
3495          * continue to be honored.
3496          */
3497         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3498                 root->fs_info->fs_devices->missing_devices;
3499         u64 target;
3500         u64 tmp;
3501
3502         /*
3503          * see if restripe for this chunk_type is in progress, if so
3504          * try to reduce to the target profile
3505          */
3506         spin_lock(&root->fs_info->balance_lock);
3507         target = get_restripe_target(root->fs_info, flags);
3508         if (target) {
3509                 /* pick target profile only if it's already available */
3510                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3511                         spin_unlock(&root->fs_info->balance_lock);
3512                         return extended_to_chunk(target);
3513                 }
3514         }
3515         spin_unlock(&root->fs_info->balance_lock);
3516
3517         /* First, mask out the RAID levels which aren't possible */
3518         if (num_devices == 1)
3519                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3520                            BTRFS_BLOCK_GROUP_RAID5);
3521         if (num_devices < 3)
3522                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3523         if (num_devices < 4)
3524                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3525
3526         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3527                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3528                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3529         flags &= ~tmp;
3530
3531         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3532                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3533         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3534                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3535         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3536                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3537         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3538                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3539         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3540                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3541
3542         return extended_to_chunk(flags | tmp);
3543 }
3544
3545 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3546 {
3547         unsigned seq;
3548
3549         do {
3550                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3551
3552                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3553                         flags |= root->fs_info->avail_data_alloc_bits;
3554                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3555                         flags |= root->fs_info->avail_system_alloc_bits;
3556                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3557                         flags |= root->fs_info->avail_metadata_alloc_bits;
3558         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3559
3560         return btrfs_reduce_alloc_profile(root, flags);
3561 }
3562
3563 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3564 {
3565         u64 flags;
3566         u64 ret;
3567
3568         if (data)
3569                 flags = BTRFS_BLOCK_GROUP_DATA;
3570         else if (root == root->fs_info->chunk_root)
3571                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3572         else
3573                 flags = BTRFS_BLOCK_GROUP_METADATA;
3574
3575         ret = get_alloc_profile(root, flags);
3576         return ret;
3577 }
3578
3579 /*
3580  * This will check the space that the inode allocates from to make sure we have
3581  * enough space for bytes.
3582  */
3583 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3584 {
3585         struct btrfs_space_info *data_sinfo;
3586         struct btrfs_root *root = BTRFS_I(inode)->root;
3587         struct btrfs_fs_info *fs_info = root->fs_info;
3588         u64 used;
3589         int ret = 0, committed = 0, alloc_chunk = 1;
3590
3591         /* make sure bytes are sectorsize aligned */
3592         bytes = ALIGN(bytes, root->sectorsize);
3593
3594         if (btrfs_is_free_space_inode(inode)) {
3595                 committed = 1;
3596                 ASSERT(current->journal_info);
3597         }
3598
3599         data_sinfo = fs_info->data_sinfo;
3600         if (!data_sinfo)
3601                 goto alloc;
3602
3603 again:
3604         /* make sure we have enough space to handle the data first */
3605         spin_lock(&data_sinfo->lock);
3606         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3607                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3608                 data_sinfo->bytes_may_use;
3609
3610         if (used + bytes > data_sinfo->total_bytes) {
3611                 struct btrfs_trans_handle *trans;
3612
3613                 /*
3614                  * if we don't have enough free bytes in this space then we need
3615                  * to alloc a new chunk.
3616                  */
3617                 if (!data_sinfo->full && alloc_chunk) {
3618                         u64 alloc_target;
3619
3620                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3621                         spin_unlock(&data_sinfo->lock);
3622 alloc:
3623                         alloc_target = btrfs_get_alloc_profile(root, 1);
3624                         /*
3625                          * It is ugly that we don't call nolock join
3626                          * transaction for the free space inode case here.
3627                          * But it is safe because we only do the data space
3628                          * reservation for the free space cache in the
3629                          * transaction context, the common join transaction
3630                          * just increase the counter of the current transaction
3631                          * handler, doesn't try to acquire the trans_lock of
3632                          * the fs.
3633                          */
3634                         trans = btrfs_join_transaction(root);
3635                         if (IS_ERR(trans))
3636                                 return PTR_ERR(trans);
3637
3638                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3639                                              alloc_target,
3640                                              CHUNK_ALLOC_NO_FORCE);
3641                         btrfs_end_transaction(trans, root);
3642                         if (ret < 0) {
3643                                 if (ret != -ENOSPC)
3644                                         return ret;
3645                                 else
3646                                         goto commit_trans;
3647                         }
3648
3649                         if (!data_sinfo)
3650                                 data_sinfo = fs_info->data_sinfo;
3651
3652                         goto again;
3653                 }
3654
3655                 /*
3656                  * If we don't have enough pinned space to deal with this
3657                  * allocation don't bother committing the transaction.
3658                  */
3659                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3660                                            bytes) < 0)
3661                         committed = 1;
3662                 spin_unlock(&data_sinfo->lock);
3663
3664                 /* commit the current transaction and try again */
3665 commit_trans:
3666                 if (!committed &&
3667                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3668                         committed = 1;
3669
3670                         trans = btrfs_join_transaction(root);
3671                         if (IS_ERR(trans))
3672                                 return PTR_ERR(trans);
3673                         ret = btrfs_commit_transaction(trans, root);
3674                         if (ret)
3675                                 return ret;
3676                         goto again;
3677                 }
3678
3679                 trace_btrfs_space_reservation(root->fs_info,
3680                                               "space_info:enospc",
3681                                               data_sinfo->flags, bytes, 1);
3682                 return -ENOSPC;
3683         }
3684         data_sinfo->bytes_may_use += bytes;
3685         trace_btrfs_space_reservation(root->fs_info, "space_info",
3686                                       data_sinfo->flags, bytes, 1);
3687         spin_unlock(&data_sinfo->lock);
3688
3689         return 0;
3690 }
3691
3692 /*
3693  * Called if we need to clear a data reservation for this inode.
3694  */
3695 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3696 {
3697         struct btrfs_root *root = BTRFS_I(inode)->root;
3698         struct btrfs_space_info *data_sinfo;
3699
3700         /* make sure bytes are sectorsize aligned */
3701         bytes = ALIGN(bytes, root->sectorsize);
3702
3703         data_sinfo = root->fs_info->data_sinfo;
3704         spin_lock(&data_sinfo->lock);
3705         WARN_ON(data_sinfo->bytes_may_use < bytes);
3706         data_sinfo->bytes_may_use -= bytes;
3707         trace_btrfs_space_reservation(root->fs_info, "space_info",
3708                                       data_sinfo->flags, bytes, 0);
3709         spin_unlock(&data_sinfo->lock);
3710 }
3711
3712 static void force_metadata_allocation(struct btrfs_fs_info *info)
3713 {
3714         struct list_head *head = &info->space_info;
3715         struct btrfs_space_info *found;
3716
3717         rcu_read_lock();
3718         list_for_each_entry_rcu(found, head, list) {
3719                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3720                         found->force_alloc = CHUNK_ALLOC_FORCE;
3721         }
3722         rcu_read_unlock();
3723 }
3724
3725 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3726 {
3727         return (global->size << 1);
3728 }
3729
3730 static int should_alloc_chunk(struct btrfs_root *root,
3731                               struct btrfs_space_info *sinfo, int force)
3732 {
3733         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3734         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3735         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3736         u64 thresh;
3737
3738         if (force == CHUNK_ALLOC_FORCE)
3739                 return 1;
3740
3741         /*
3742          * We need to take into account the global rsv because for all intents
3743          * and purposes it's used space.  Don't worry about locking the
3744          * global_rsv, it doesn't change except when the transaction commits.
3745          */
3746         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3747                 num_allocated += calc_global_rsv_need_space(global_rsv);
3748
3749         /*
3750          * in limited mode, we want to have some free space up to
3751          * about 1% of the FS size.
3752          */
3753         if (force == CHUNK_ALLOC_LIMITED) {
3754                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3755                 thresh = max_t(u64, 64 * 1024 * 1024,
3756                                div_factor_fine(thresh, 1));
3757
3758                 if (num_bytes - num_allocated < thresh)
3759                         return 1;
3760         }
3761
3762         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3763                 return 0;
3764         return 1;
3765 }
3766
3767 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3768 {
3769         u64 num_dev;
3770
3771         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3772                     BTRFS_BLOCK_GROUP_RAID0 |
3773                     BTRFS_BLOCK_GROUP_RAID5 |
3774                     BTRFS_BLOCK_GROUP_RAID6))
3775                 num_dev = root->fs_info->fs_devices->rw_devices;
3776         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3777                 num_dev = 2;
3778         else
3779                 num_dev = 1;    /* DUP or single */
3780
3781         /* metadata for updaing devices and chunk tree */
3782         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3783 }
3784
3785 static void check_system_chunk(struct btrfs_trans_handle *trans,
3786                                struct btrfs_root *root, u64 type)
3787 {
3788         struct btrfs_space_info *info;
3789         u64 left;
3790         u64 thresh;
3791
3792         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3793         spin_lock(&info->lock);
3794         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3795                 info->bytes_reserved - info->bytes_readonly;
3796         spin_unlock(&info->lock);
3797
3798         thresh = get_system_chunk_thresh(root, type);
3799         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3800                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3801                         left, thresh, type);
3802                 dump_space_info(info, 0, 0);
3803         }
3804
3805         if (left < thresh) {
3806                 u64 flags;
3807
3808                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3809                 btrfs_alloc_chunk(trans, root, flags);
3810         }
3811 }
3812
3813 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3814                           struct btrfs_root *extent_root, u64 flags, int force)
3815 {
3816         struct btrfs_space_info *space_info;
3817         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3818         int wait_for_alloc = 0;
3819         int ret = 0;
3820
3821         /* Don't re-enter if we're already allocating a chunk */
3822         if (trans->allocating_chunk)
3823                 return -ENOSPC;
3824
3825         space_info = __find_space_info(extent_root->fs_info, flags);
3826         if (!space_info) {
3827                 ret = update_space_info(extent_root->fs_info, flags,
3828                                         0, 0, &space_info);
3829                 BUG_ON(ret); /* -ENOMEM */
3830         }
3831         BUG_ON(!space_info); /* Logic error */
3832
3833 again:
3834         spin_lock(&space_info->lock);
3835         if (force < space_info->force_alloc)
3836                 force = space_info->force_alloc;
3837         if (space_info->full) {
3838                 if (should_alloc_chunk(extent_root, space_info, force))
3839                         ret = -ENOSPC;
3840                 else
3841                         ret = 0;
3842                 spin_unlock(&space_info->lock);
3843                 return ret;
3844         }
3845
3846         if (!should_alloc_chunk(extent_root, space_info, force)) {
3847                 spin_unlock(&space_info->lock);
3848                 return 0;
3849         } else if (space_info->chunk_alloc) {
3850                 wait_for_alloc = 1;
3851         } else {
3852                 space_info->chunk_alloc = 1;
3853         }
3854
3855         spin_unlock(&space_info->lock);
3856
3857         mutex_lock(&fs_info->chunk_mutex);
3858
3859         /*
3860          * The chunk_mutex is held throughout the entirety of a chunk
3861          * allocation, so once we've acquired the chunk_mutex we know that the
3862          * other guy is done and we need to recheck and see if we should
3863          * allocate.
3864          */
3865         if (wait_for_alloc) {
3866                 mutex_unlock(&fs_info->chunk_mutex);
3867                 wait_for_alloc = 0;
3868                 goto again;
3869         }
3870
3871         trans->allocating_chunk = true;
3872
3873         /*
3874          * If we have mixed data/metadata chunks we want to make sure we keep
3875          * allocating mixed chunks instead of individual chunks.
3876          */
3877         if (btrfs_mixed_space_info(space_info))
3878                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3879
3880         /*
3881          * if we're doing a data chunk, go ahead and make sure that
3882          * we keep a reasonable number of metadata chunks allocated in the
3883          * FS as well.
3884          */
3885         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3886                 fs_info->data_chunk_allocations++;
3887                 if (!(fs_info->data_chunk_allocations %
3888                       fs_info->metadata_ratio))
3889                         force_metadata_allocation(fs_info);
3890         }
3891
3892         /*
3893          * Check if we have enough space in SYSTEM chunk because we may need
3894          * to update devices.
3895          */
3896         check_system_chunk(trans, extent_root, flags);
3897
3898         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3899         trans->allocating_chunk = false;
3900
3901         spin_lock(&space_info->lock);
3902         if (ret < 0 && ret != -ENOSPC)
3903                 goto out;
3904         if (ret)
3905                 space_info->full = 1;
3906         else
3907                 ret = 1;
3908
3909         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3910 out:
3911         space_info->chunk_alloc = 0;
3912         spin_unlock(&space_info->lock);
3913         mutex_unlock(&fs_info->chunk_mutex);
3914         return ret;
3915 }
3916
3917 static int can_overcommit(struct btrfs_root *root,
3918                           struct btrfs_space_info *space_info, u64 bytes,
3919                           enum btrfs_reserve_flush_enum flush)
3920 {
3921         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3922         u64 profile = btrfs_get_alloc_profile(root, 0);
3923         u64 space_size;
3924         u64 avail;
3925         u64 used;
3926
3927         used = space_info->bytes_used + space_info->bytes_reserved +
3928                 space_info->bytes_pinned + space_info->bytes_readonly;
3929
3930         /*
3931          * We only want to allow over committing if we have lots of actual space
3932          * free, but if we don't have enough space to handle the global reserve
3933          * space then we could end up having a real enospc problem when trying
3934          * to allocate a chunk or some other such important allocation.
3935          */
3936         spin_lock(&global_rsv->lock);
3937         space_size = calc_global_rsv_need_space(global_rsv);
3938         spin_unlock(&global_rsv->lock);
3939         if (used + space_size >= space_info->total_bytes)
3940                 return 0;
3941
3942         used += space_info->bytes_may_use;
3943
3944         spin_lock(&root->fs_info->free_chunk_lock);
3945         avail = root->fs_info->free_chunk_space;
3946         spin_unlock(&root->fs_info->free_chunk_lock);
3947
3948         /*
3949          * If we have dup, raid1 or raid10 then only half of the free
3950          * space is actually useable.  For raid56, the space info used
3951          * doesn't include the parity drive, so we don't have to
3952          * change the math
3953          */
3954         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3955                        BTRFS_BLOCK_GROUP_RAID1 |
3956                        BTRFS_BLOCK_GROUP_RAID10))
3957                 avail >>= 1;
3958
3959         /*
3960          * If we aren't flushing all things, let us overcommit up to
3961          * 1/2th of the space. If we can flush, don't let us overcommit
3962          * too much, let it overcommit up to 1/8 of the space.
3963          */
3964         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3965                 avail >>= 3;
3966         else
3967                 avail >>= 1;
3968
3969         if (used + bytes < space_info->total_bytes + avail)
3970                 return 1;
3971         return 0;
3972 }
3973
3974 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3975                                          unsigned long nr_pages, int nr_items)
3976 {
3977         struct super_block *sb = root->fs_info->sb;
3978
3979         if (down_read_trylock(&sb->s_umount)) {
3980                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3981                 up_read(&sb->s_umount);
3982         } else {
3983                 /*
3984                  * We needn't worry the filesystem going from r/w to r/o though
3985                  * we don't acquire ->s_umount mutex, because the filesystem
3986                  * should guarantee the delalloc inodes list be empty after
3987                  * the filesystem is readonly(all dirty pages are written to
3988                  * the disk).
3989                  */
3990                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
3991                 if (!current->journal_info)
3992                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
3993         }
3994 }
3995
3996 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
3997 {
3998         u64 bytes;
3999         int nr;
4000
4001         bytes = btrfs_calc_trans_metadata_size(root, 1);
4002         nr = (int)div64_u64(to_reclaim, bytes);
4003         if (!nr)
4004                 nr = 1;
4005         return nr;
4006 }
4007
4008 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4009
4010 /*
4011  * shrink metadata reservation for delalloc
4012  */
4013 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4014                             bool wait_ordered)
4015 {
4016         struct btrfs_block_rsv *block_rsv;
4017         struct btrfs_space_info *space_info;
4018         struct btrfs_trans_handle *trans;
4019         u64 delalloc_bytes;
4020         u64 max_reclaim;
4021         long time_left;
4022         unsigned long nr_pages;
4023         int loops;
4024         int items;
4025         enum btrfs_reserve_flush_enum flush;
4026
4027         /* Calc the number of the pages we need flush for space reservation */
4028         items = calc_reclaim_items_nr(root, to_reclaim);
4029         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4030
4031         trans = (struct btrfs_trans_handle *)current->journal_info;
4032         block_rsv = &root->fs_info->delalloc_block_rsv;
4033         space_info = block_rsv->space_info;
4034
4035         delalloc_bytes = percpu_counter_sum_positive(
4036                                                 &root->fs_info->delalloc_bytes);
4037         if (delalloc_bytes == 0) {
4038                 if (trans)
4039                         return;
4040                 if (wait_ordered)
4041                         btrfs_wait_ordered_roots(root->fs_info, items);
4042                 return;
4043         }
4044
4045         loops = 0;
4046         while (delalloc_bytes && loops < 3) {
4047                 max_reclaim = min(delalloc_bytes, to_reclaim);
4048                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4049                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4050                 /*
4051                  * We need to wait for the async pages to actually start before
4052                  * we do anything.
4053                  */
4054                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4055                 if (!max_reclaim)
4056                         goto skip_async;
4057
4058                 if (max_reclaim <= nr_pages)
4059                         max_reclaim = 0;
4060                 else
4061                         max_reclaim -= nr_pages;
4062
4063                 wait_event(root->fs_info->async_submit_wait,
4064                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4065                            (int)max_reclaim);
4066 skip_async:
4067                 if (!trans)
4068                         flush = BTRFS_RESERVE_FLUSH_ALL;
4069                 else
4070                         flush = BTRFS_RESERVE_NO_FLUSH;
4071                 spin_lock(&space_info->lock);
4072                 if (can_overcommit(root, space_info, orig, flush)) {
4073                         spin_unlock(&space_info->lock);
4074                         break;
4075                 }
4076                 spin_unlock(&space_info->lock);
4077
4078                 loops++;
4079                 if (wait_ordered && !trans) {
4080                         btrfs_wait_ordered_roots(root->fs_info, items);
4081                 } else {
4082                         time_left = schedule_timeout_killable(1);
4083                         if (time_left)
4084                                 break;
4085                 }
4086                 delalloc_bytes = percpu_counter_sum_positive(
4087                                                 &root->fs_info->delalloc_bytes);
4088         }
4089 }
4090
4091 /**
4092  * maybe_commit_transaction - possibly commit the transaction if its ok to
4093  * @root - the root we're allocating for
4094  * @bytes - the number of bytes we want to reserve
4095  * @force - force the commit
4096  *
4097  * This will check to make sure that committing the transaction will actually
4098  * get us somewhere and then commit the transaction if it does.  Otherwise it
4099  * will return -ENOSPC.
4100  */
4101 static int may_commit_transaction(struct btrfs_root *root,
4102                                   struct btrfs_space_info *space_info,
4103                                   u64 bytes, int force)
4104 {
4105         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4106         struct btrfs_trans_handle *trans;
4107
4108         trans = (struct btrfs_trans_handle *)current->journal_info;
4109         if (trans)
4110                 return -EAGAIN;
4111
4112         if (force)
4113                 goto commit;
4114
4115         /* See if there is enough pinned space to make this reservation */
4116         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4117                                    bytes) >= 0)
4118                 goto commit;
4119
4120         /*
4121          * See if there is some space in the delayed insertion reservation for
4122          * this reservation.
4123          */
4124         if (space_info != delayed_rsv->space_info)
4125                 return -ENOSPC;
4126
4127         spin_lock(&delayed_rsv->lock);
4128         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4129                                    bytes - delayed_rsv->size) >= 0) {
4130                 spin_unlock(&delayed_rsv->lock);
4131                 return -ENOSPC;
4132         }
4133         spin_unlock(&delayed_rsv->lock);
4134
4135 commit:
4136         trans = btrfs_join_transaction(root);
4137         if (IS_ERR(trans))
4138                 return -ENOSPC;
4139
4140         return btrfs_commit_transaction(trans, root);
4141 }
4142
4143 enum flush_state {
4144         FLUSH_DELAYED_ITEMS_NR  =       1,
4145         FLUSH_DELAYED_ITEMS     =       2,
4146         FLUSH_DELALLOC          =       3,
4147         FLUSH_DELALLOC_WAIT     =       4,
4148         ALLOC_CHUNK             =       5,
4149         COMMIT_TRANS            =       6,
4150 };
4151
4152 static int flush_space(struct btrfs_root *root,
4153                        struct btrfs_space_info *space_info, u64 num_bytes,
4154                        u64 orig_bytes, int state)
4155 {
4156         struct btrfs_trans_handle *trans;
4157         int nr;
4158         int ret = 0;
4159
4160         switch (state) {
4161         case FLUSH_DELAYED_ITEMS_NR:
4162         case FLUSH_DELAYED_ITEMS:
4163                 if (state == FLUSH_DELAYED_ITEMS_NR)
4164                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4165                 else
4166                         nr = -1;
4167
4168                 trans = btrfs_join_transaction(root);
4169                 if (IS_ERR(trans)) {
4170                         ret = PTR_ERR(trans);
4171                         break;
4172                 }
4173                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4174                 btrfs_end_transaction(trans, root);
4175                 break;
4176         case FLUSH_DELALLOC:
4177         case FLUSH_DELALLOC_WAIT:
4178                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4179                                 state == FLUSH_DELALLOC_WAIT);
4180                 break;
4181         case ALLOC_CHUNK:
4182                 trans = btrfs_join_transaction(root);
4183                 if (IS_ERR(trans)) {
4184                         ret = PTR_ERR(trans);
4185                         break;
4186                 }
4187                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4188                                      btrfs_get_alloc_profile(root, 0),
4189                                      CHUNK_ALLOC_NO_FORCE);
4190                 btrfs_end_transaction(trans, root);
4191                 if (ret == -ENOSPC)
4192                         ret = 0;
4193                 break;
4194         case COMMIT_TRANS:
4195                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4196                 break;
4197         default:
4198                 ret = -ENOSPC;
4199                 break;
4200         }
4201
4202         return ret;
4203 }
4204 /**
4205  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4206  * @root - the root we're allocating for
4207  * @block_rsv - the block_rsv we're allocating for
4208  * @orig_bytes - the number of bytes we want
4209  * @flush - whether or not we can flush to make our reservation
4210  *
4211  * This will reserve orgi_bytes number of bytes from the space info associated
4212  * with the block_rsv.  If there is not enough space it will make an attempt to
4213  * flush out space to make room.  It will do this by flushing delalloc if
4214  * possible or committing the transaction.  If flush is 0 then no attempts to
4215  * regain reservations will be made and this will fail if there is not enough
4216  * space already.
4217  */
4218 static int reserve_metadata_bytes(struct btrfs_root *root,
4219                                   struct btrfs_block_rsv *block_rsv,
4220                                   u64 orig_bytes,
4221                                   enum btrfs_reserve_flush_enum flush)
4222 {
4223         struct btrfs_space_info *space_info = block_rsv->space_info;
4224         u64 used;
4225         u64 num_bytes = orig_bytes;
4226         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4227         int ret = 0;
4228         bool flushing = false;
4229
4230 again:
4231         ret = 0;
4232         spin_lock(&space_info->lock);
4233         /*
4234          * We only want to wait if somebody other than us is flushing and we
4235          * are actually allowed to flush all things.
4236          */
4237         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4238                space_info->flush) {
4239                 spin_unlock(&space_info->lock);
4240                 /*
4241                  * If we have a trans handle we can't wait because the flusher
4242                  * may have to commit the transaction, which would mean we would
4243                  * deadlock since we are waiting for the flusher to finish, but
4244                  * hold the current transaction open.
4245                  */
4246                 if (current->journal_info)
4247                         return -EAGAIN;
4248                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4249                 /* Must have been killed, return */
4250                 if (ret)
4251                         return -EINTR;
4252
4253                 spin_lock(&space_info->lock);
4254         }
4255
4256         ret = -ENOSPC;
4257         used = space_info->bytes_used + space_info->bytes_reserved +
4258                 space_info->bytes_pinned + space_info->bytes_readonly +
4259                 space_info->bytes_may_use;
4260
4261         /*
4262          * The idea here is that we've not already over-reserved the block group
4263          * then we can go ahead and save our reservation first and then start
4264          * flushing if we need to.  Otherwise if we've already overcommitted
4265          * lets start flushing stuff first and then come back and try to make
4266          * our reservation.
4267          */
4268         if (used <= space_info->total_bytes) {
4269                 if (used + orig_bytes <= space_info->total_bytes) {
4270                         space_info->bytes_may_use += orig_bytes;
4271                         trace_btrfs_space_reservation(root->fs_info,
4272                                 "space_info", space_info->flags, orig_bytes, 1);
4273                         ret = 0;
4274                 } else {
4275                         /*
4276                          * Ok set num_bytes to orig_bytes since we aren't
4277                          * overocmmitted, this way we only try and reclaim what
4278                          * we need.
4279                          */
4280                         num_bytes = orig_bytes;
4281                 }
4282         } else {
4283                 /*
4284                  * Ok we're over committed, set num_bytes to the overcommitted
4285                  * amount plus the amount of bytes that we need for this
4286                  * reservation.
4287                  */
4288                 num_bytes = used - space_info->total_bytes +
4289                         (orig_bytes * 2);
4290         }
4291
4292         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4293                 space_info->bytes_may_use += orig_bytes;
4294                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4295                                               space_info->flags, orig_bytes,
4296                                               1);
4297                 ret = 0;
4298         }
4299
4300         /*
4301          * Couldn't make our reservation, save our place so while we're trying
4302          * to reclaim space we can actually use it instead of somebody else
4303          * stealing it from us.
4304          *
4305          * We make the other tasks wait for the flush only when we can flush
4306          * all things.
4307          */
4308         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4309                 flushing = true;
4310                 space_info->flush = 1;
4311         }
4312
4313         spin_unlock(&space_info->lock);
4314
4315         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4316                 goto out;
4317
4318         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4319                           flush_state);
4320         flush_state++;
4321
4322         /*
4323          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4324          * would happen. So skip delalloc flush.
4325          */
4326         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4327             (flush_state == FLUSH_DELALLOC ||
4328              flush_state == FLUSH_DELALLOC_WAIT))
4329                 flush_state = ALLOC_CHUNK;
4330
4331         if (!ret)
4332                 goto again;
4333         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4334                  flush_state < COMMIT_TRANS)
4335                 goto again;
4336         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4337                  flush_state <= COMMIT_TRANS)
4338                 goto again;
4339
4340 out:
4341         if (ret == -ENOSPC &&
4342             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4343                 struct btrfs_block_rsv *global_rsv =
4344                         &root->fs_info->global_block_rsv;
4345
4346                 if (block_rsv != global_rsv &&
4347                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4348                         ret = 0;
4349         }
4350         if (ret == -ENOSPC)
4351                 trace_btrfs_space_reservation(root->fs_info,
4352                                               "space_info:enospc",
4353                                               space_info->flags, orig_bytes, 1);
4354         if (flushing) {
4355                 spin_lock(&space_info->lock);
4356                 space_info->flush = 0;
4357                 wake_up_all(&space_info->wait);
4358                 spin_unlock(&space_info->lock);
4359         }
4360         return ret;
4361 }
4362
4363 static struct btrfs_block_rsv *get_block_rsv(
4364                                         const struct btrfs_trans_handle *trans,
4365                                         const struct btrfs_root *root)
4366 {
4367         struct btrfs_block_rsv *block_rsv = NULL;
4368
4369         if (root->ref_cows)
4370                 block_rsv = trans->block_rsv;
4371
4372         if (root == root->fs_info->csum_root && trans->adding_csums)
4373                 block_rsv = trans->block_rsv;
4374
4375         if (root == root->fs_info->uuid_root)
4376                 block_rsv = trans->block_rsv;
4377
4378         if (!block_rsv)
4379                 block_rsv = root->block_rsv;
4380
4381         if (!block_rsv)
4382                 block_rsv = &root->fs_info->empty_block_rsv;
4383
4384         return block_rsv;
4385 }
4386
4387 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4388                                u64 num_bytes)
4389 {
4390         int ret = -ENOSPC;
4391         spin_lock(&block_rsv->lock);
4392         if (block_rsv->reserved >= num_bytes) {
4393                 block_rsv->reserved -= num_bytes;
4394                 if (block_rsv->reserved < block_rsv->size)
4395                         block_rsv->full = 0;
4396                 ret = 0;
4397         }
4398         spin_unlock(&block_rsv->lock);
4399         return ret;
4400 }
4401
4402 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4403                                 u64 num_bytes, int update_size)
4404 {
4405         spin_lock(&block_rsv->lock);
4406         block_rsv->reserved += num_bytes;
4407         if (update_size)
4408                 block_rsv->size += num_bytes;
4409         else if (block_rsv->reserved >= block_rsv->size)
4410                 block_rsv->full = 1;
4411         spin_unlock(&block_rsv->lock);
4412 }
4413
4414 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4415                              struct btrfs_block_rsv *dest, u64 num_bytes,
4416                              int min_factor)
4417 {
4418         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4419         u64 min_bytes;
4420
4421         if (global_rsv->space_info != dest->space_info)
4422                 return -ENOSPC;
4423
4424         spin_lock(&global_rsv->lock);
4425         min_bytes = div_factor(global_rsv->size, min_factor);
4426         if (global_rsv->reserved < min_bytes + num_bytes) {
4427                 spin_unlock(&global_rsv->lock);
4428                 return -ENOSPC;
4429         }
4430         global_rsv->reserved -= num_bytes;
4431         if (global_rsv->reserved < global_rsv->size)
4432                 global_rsv->full = 0;
4433         spin_unlock(&global_rsv->lock);
4434
4435         block_rsv_add_bytes(dest, num_bytes, 1);
4436         return 0;
4437 }
4438
4439 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4440                                     struct btrfs_block_rsv *block_rsv,
4441                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4442 {
4443         struct btrfs_space_info *space_info = block_rsv->space_info;
4444
4445         spin_lock(&block_rsv->lock);
4446         if (num_bytes == (u64)-1)
4447                 num_bytes = block_rsv->size;
4448         block_rsv->size -= num_bytes;
4449         if (block_rsv->reserved >= block_rsv->size) {
4450                 num_bytes = block_rsv->reserved - block_rsv->size;
4451                 block_rsv->reserved = block_rsv->size;
4452                 block_rsv->full = 1;
4453         } else {
4454                 num_bytes = 0;
4455         }
4456         spin_unlock(&block_rsv->lock);
4457
4458         if (num_bytes > 0) {
4459                 if (dest) {
4460                         spin_lock(&dest->lock);
4461                         if (!dest->full) {
4462                                 u64 bytes_to_add;
4463
4464                                 bytes_to_add = dest->size - dest->reserved;
4465                                 bytes_to_add = min(num_bytes, bytes_to_add);
4466                                 dest->reserved += bytes_to_add;
4467                                 if (dest->reserved >= dest->size)
4468                                         dest->full = 1;
4469                                 num_bytes -= bytes_to_add;
4470                         }
4471                         spin_unlock(&dest->lock);
4472                 }
4473                 if (num_bytes) {
4474                         spin_lock(&space_info->lock);
4475                         space_info->bytes_may_use -= num_bytes;
4476                         trace_btrfs_space_reservation(fs_info, "space_info",
4477                                         space_info->flags, num_bytes, 0);
4478                         spin_unlock(&space_info->lock);
4479                 }
4480         }
4481 }
4482
4483 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4484                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4485 {
4486         int ret;
4487
4488         ret = block_rsv_use_bytes(src, num_bytes);
4489         if (ret)
4490                 return ret;
4491
4492         block_rsv_add_bytes(dst, num_bytes, 1);
4493         return 0;
4494 }
4495
4496 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4497 {
4498         memset(rsv, 0, sizeof(*rsv));
4499         spin_lock_init(&rsv->lock);
4500         rsv->type = type;
4501 }
4502
4503 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4504                                               unsigned short type)
4505 {
4506         struct btrfs_block_rsv *block_rsv;
4507         struct btrfs_fs_info *fs_info = root->fs_info;
4508
4509         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4510         if (!block_rsv)
4511                 return NULL;
4512
4513         btrfs_init_block_rsv(block_rsv, type);
4514         block_rsv->space_info = __find_space_info(fs_info,
4515                                                   BTRFS_BLOCK_GROUP_METADATA);
4516         return block_rsv;
4517 }
4518
4519 void btrfs_free_block_rsv(struct btrfs_root *root,
4520                           struct btrfs_block_rsv *rsv)
4521 {
4522         if (!rsv)
4523                 return;
4524         btrfs_block_rsv_release(root, rsv, (u64)-1);
4525         kfree(rsv);
4526 }
4527
4528 int btrfs_block_rsv_add(struct btrfs_root *root,
4529                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4530                         enum btrfs_reserve_flush_enum flush)
4531 {
4532         int ret;
4533
4534         if (num_bytes == 0)
4535                 return 0;
4536
4537         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4538         if (!ret) {
4539                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4540                 return 0;
4541         }
4542
4543         return ret;
4544 }
4545
4546 int btrfs_block_rsv_check(struct btrfs_root *root,
4547                           struct btrfs_block_rsv *block_rsv, int min_factor)
4548 {
4549         u64 num_bytes = 0;
4550         int ret = -ENOSPC;
4551
4552         if (!block_rsv)
4553                 return 0;
4554
4555         spin_lock(&block_rsv->lock);
4556         num_bytes = div_factor(block_rsv->size, min_factor);
4557         if (block_rsv->reserved >= num_bytes)
4558                 ret = 0;
4559         spin_unlock(&block_rsv->lock);
4560
4561         return ret;
4562 }
4563
4564 int btrfs_block_rsv_refill(struct btrfs_root *root,
4565                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4566                            enum btrfs_reserve_flush_enum flush)
4567 {
4568         u64 num_bytes = 0;
4569         int ret = -ENOSPC;
4570
4571         if (!block_rsv)
4572                 return 0;
4573
4574         spin_lock(&block_rsv->lock);
4575         num_bytes = min_reserved;
4576         if (block_rsv->reserved >= num_bytes)
4577                 ret = 0;
4578         else
4579                 num_bytes -= block_rsv->reserved;
4580         spin_unlock(&block_rsv->lock);
4581
4582         if (!ret)
4583                 return 0;
4584
4585         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4586         if (!ret) {
4587                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4588                 return 0;
4589         }
4590
4591         return ret;
4592 }
4593
4594 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4595                             struct btrfs_block_rsv *dst_rsv,
4596                             u64 num_bytes)
4597 {
4598         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4599 }
4600
4601 void btrfs_block_rsv_release(struct btrfs_root *root,
4602                              struct btrfs_block_rsv *block_rsv,
4603                              u64 num_bytes)
4604 {
4605         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4606         if (global_rsv == block_rsv ||
4607             block_rsv->space_info != global_rsv->space_info)
4608                 global_rsv = NULL;
4609         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4610                                 num_bytes);
4611 }
4612
4613 /*
4614  * helper to calculate size of global block reservation.
4615  * the desired value is sum of space used by extent tree,
4616  * checksum tree and root tree
4617  */
4618 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4619 {
4620         struct btrfs_space_info *sinfo;
4621         u64 num_bytes;
4622         u64 meta_used;
4623         u64 data_used;
4624         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4625
4626         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4627         spin_lock(&sinfo->lock);
4628         data_used = sinfo->bytes_used;
4629         spin_unlock(&sinfo->lock);
4630
4631         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4632         spin_lock(&sinfo->lock);
4633         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4634                 data_used = 0;
4635         meta_used = sinfo->bytes_used;
4636         spin_unlock(&sinfo->lock);
4637
4638         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4639                     csum_size * 2;
4640         num_bytes += div64_u64(data_used + meta_used, 50);
4641
4642         if (num_bytes * 3 > meta_used)
4643                 num_bytes = div64_u64(meta_used, 3);
4644
4645         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4646 }
4647
4648 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4649 {
4650         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4651         struct btrfs_space_info *sinfo = block_rsv->space_info;
4652         u64 num_bytes;
4653
4654         num_bytes = calc_global_metadata_size(fs_info);
4655
4656         spin_lock(&sinfo->lock);
4657         spin_lock(&block_rsv->lock);
4658
4659         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4660
4661         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4662                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4663                     sinfo->bytes_may_use;
4664
4665         if (sinfo->total_bytes > num_bytes) {
4666                 num_bytes = sinfo->total_bytes - num_bytes;
4667                 block_rsv->reserved += num_bytes;
4668                 sinfo->bytes_may_use += num_bytes;
4669                 trace_btrfs_space_reservation(fs_info, "space_info",
4670                                       sinfo->flags, num_bytes, 1);
4671         }
4672
4673         if (block_rsv->reserved >= block_rsv->size) {
4674                 num_bytes = block_rsv->reserved - block_rsv->size;
4675                 sinfo->bytes_may_use -= num_bytes;
4676                 trace_btrfs_space_reservation(fs_info, "space_info",
4677                                       sinfo->flags, num_bytes, 0);
4678                 block_rsv->reserved = block_rsv->size;
4679                 block_rsv->full = 1;
4680         }
4681
4682         spin_unlock(&block_rsv->lock);
4683         spin_unlock(&sinfo->lock);
4684 }
4685
4686 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4687 {
4688         struct btrfs_space_info *space_info;
4689
4690         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4691         fs_info->chunk_block_rsv.space_info = space_info;
4692
4693         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4694         fs_info->global_block_rsv.space_info = space_info;
4695         fs_info->delalloc_block_rsv.space_info = space_info;
4696         fs_info->trans_block_rsv.space_info = space_info;
4697         fs_info->empty_block_rsv.space_info = space_info;
4698         fs_info->delayed_block_rsv.space_info = space_info;
4699
4700         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4701         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4702         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4703         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4704         if (fs_info->quota_root)
4705                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4706         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4707
4708         update_global_block_rsv(fs_info);
4709 }
4710
4711 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4712 {
4713         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4714                                 (u64)-1);
4715         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4716         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4717         WARN_ON(fs_info->trans_block_rsv.size > 0);
4718         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4719         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4720         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4721         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4722         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4723 }
4724
4725 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4726                                   struct btrfs_root *root)
4727 {
4728         if (!trans->block_rsv)
4729                 return;
4730
4731         if (!trans->bytes_reserved)
4732                 return;
4733
4734         trace_btrfs_space_reservation(root->fs_info, "transaction",
4735                                       trans->transid, trans->bytes_reserved, 0);
4736         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4737         trans->bytes_reserved = 0;
4738 }
4739
4740 /* Can only return 0 or -ENOSPC */
4741 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4742                                   struct inode *inode)
4743 {
4744         struct btrfs_root *root = BTRFS_I(inode)->root;
4745         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4746         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4747
4748         /*
4749          * We need to hold space in order to delete our orphan item once we've
4750          * added it, so this takes the reservation so we can release it later
4751          * when we are truly done with the orphan item.
4752          */
4753         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4754         trace_btrfs_space_reservation(root->fs_info, "orphan",
4755                                       btrfs_ino(inode), num_bytes, 1);
4756         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4757 }
4758
4759 void btrfs_orphan_release_metadata(struct inode *inode)
4760 {
4761         struct btrfs_root *root = BTRFS_I(inode)->root;
4762         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4763         trace_btrfs_space_reservation(root->fs_info, "orphan",
4764                                       btrfs_ino(inode), num_bytes, 0);
4765         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4766 }
4767
4768 /*
4769  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4770  * root: the root of the parent directory
4771  * rsv: block reservation
4772  * items: the number of items that we need do reservation
4773  * qgroup_reserved: used to return the reserved size in qgroup
4774  *
4775  * This function is used to reserve the space for snapshot/subvolume
4776  * creation and deletion. Those operations are different with the
4777  * common file/directory operations, they change two fs/file trees
4778  * and root tree, the number of items that the qgroup reserves is
4779  * different with the free space reservation. So we can not use
4780  * the space reseravtion mechanism in start_transaction().
4781  */
4782 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4783                                      struct btrfs_block_rsv *rsv,
4784                                      int items,
4785                                      u64 *qgroup_reserved,
4786                                      bool use_global_rsv)
4787 {
4788         u64 num_bytes;
4789         int ret;
4790         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4791
4792         if (root->fs_info->quota_enabled) {
4793                 /* One for parent inode, two for dir entries */
4794                 num_bytes = 3 * root->leafsize;
4795                 ret = btrfs_qgroup_reserve(root, num_bytes);
4796                 if (ret)
4797                         return ret;
4798         } else {
4799                 num_bytes = 0;
4800         }
4801
4802         *qgroup_reserved = num_bytes;
4803
4804         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4805         rsv->space_info = __find_space_info(root->fs_info,
4806                                             BTRFS_BLOCK_GROUP_METADATA);
4807         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4808                                   BTRFS_RESERVE_FLUSH_ALL);
4809
4810         if (ret == -ENOSPC && use_global_rsv)
4811                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4812
4813         if (ret) {
4814                 if (*qgroup_reserved)
4815                         btrfs_qgroup_free(root, *qgroup_reserved);
4816         }
4817
4818         return ret;
4819 }
4820
4821 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4822                                       struct btrfs_block_rsv *rsv,
4823                                       u64 qgroup_reserved)
4824 {
4825         btrfs_block_rsv_release(root, rsv, (u64)-1);
4826         if (qgroup_reserved)
4827                 btrfs_qgroup_free(root, qgroup_reserved);
4828 }
4829
4830 /**
4831  * drop_outstanding_extent - drop an outstanding extent
4832  * @inode: the inode we're dropping the extent for
4833  *
4834  * This is called when we are freeing up an outstanding extent, either called
4835  * after an error or after an extent is written.  This will return the number of
4836  * reserved extents that need to be freed.  This must be called with
4837  * BTRFS_I(inode)->lock held.
4838  */
4839 static unsigned drop_outstanding_extent(struct inode *inode)
4840 {
4841         unsigned drop_inode_space = 0;
4842         unsigned dropped_extents = 0;
4843
4844         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4845         BTRFS_I(inode)->outstanding_extents--;
4846
4847         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4848             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4849                                &BTRFS_I(inode)->runtime_flags))
4850                 drop_inode_space = 1;
4851
4852         /*
4853          * If we have more or the same amount of outsanding extents than we have
4854          * reserved then we need to leave the reserved extents count alone.
4855          */
4856         if (BTRFS_I(inode)->outstanding_extents >=
4857             BTRFS_I(inode)->reserved_extents)
4858                 return drop_inode_space;
4859
4860         dropped_extents = BTRFS_I(inode)->reserved_extents -
4861                 BTRFS_I(inode)->outstanding_extents;
4862         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4863         return dropped_extents + drop_inode_space;
4864 }
4865
4866 /**
4867  * calc_csum_metadata_size - return the amount of metada space that must be
4868  *      reserved/free'd for the given bytes.
4869  * @inode: the inode we're manipulating
4870  * @num_bytes: the number of bytes in question
4871  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4872  *
4873  * This adjusts the number of csum_bytes in the inode and then returns the
4874  * correct amount of metadata that must either be reserved or freed.  We
4875  * calculate how many checksums we can fit into one leaf and then divide the
4876  * number of bytes that will need to be checksumed by this value to figure out
4877  * how many checksums will be required.  If we are adding bytes then the number
4878  * may go up and we will return the number of additional bytes that must be
4879  * reserved.  If it is going down we will return the number of bytes that must
4880  * be freed.
4881  *
4882  * This must be called with BTRFS_I(inode)->lock held.
4883  */
4884 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4885                                    int reserve)
4886 {
4887         struct btrfs_root *root = BTRFS_I(inode)->root;
4888         u64 csum_size;
4889         int num_csums_per_leaf;
4890         int num_csums;
4891         int old_csums;
4892
4893         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4894             BTRFS_I(inode)->csum_bytes == 0)
4895                 return 0;
4896
4897         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4898         if (reserve)
4899                 BTRFS_I(inode)->csum_bytes += num_bytes;
4900         else
4901                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4902         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4903         num_csums_per_leaf = (int)div64_u64(csum_size,
4904                                             sizeof(struct btrfs_csum_item) +
4905                                             sizeof(struct btrfs_disk_key));
4906         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4907         num_csums = num_csums + num_csums_per_leaf - 1;
4908         num_csums = num_csums / num_csums_per_leaf;
4909
4910         old_csums = old_csums + num_csums_per_leaf - 1;
4911         old_csums = old_csums / num_csums_per_leaf;
4912
4913         /* No change, no need to reserve more */
4914         if (old_csums == num_csums)
4915                 return 0;
4916
4917         if (reserve)
4918                 return btrfs_calc_trans_metadata_size(root,
4919                                                       num_csums - old_csums);
4920
4921         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4922 }
4923
4924 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4925 {
4926         struct btrfs_root *root = BTRFS_I(inode)->root;
4927         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4928         u64 to_reserve = 0;
4929         u64 csum_bytes;
4930         unsigned nr_extents = 0;
4931         int extra_reserve = 0;
4932         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4933         int ret = 0;
4934         bool delalloc_lock = true;
4935         u64 to_free = 0;
4936         unsigned dropped;
4937
4938         /* If we are a free space inode we need to not flush since we will be in
4939          * the middle of a transaction commit.  We also don't need the delalloc
4940          * mutex since we won't race with anybody.  We need this mostly to make
4941          * lockdep shut its filthy mouth.
4942          */
4943         if (btrfs_is_free_space_inode(inode)) {
4944                 flush = BTRFS_RESERVE_NO_FLUSH;
4945                 delalloc_lock = false;
4946         }
4947
4948         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4949             btrfs_transaction_in_commit(root->fs_info))
4950                 schedule_timeout(1);
4951
4952         if (delalloc_lock)
4953                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4954
4955         num_bytes = ALIGN(num_bytes, root->sectorsize);
4956
4957         spin_lock(&BTRFS_I(inode)->lock);
4958         BTRFS_I(inode)->outstanding_extents++;
4959
4960         if (BTRFS_I(inode)->outstanding_extents >
4961             BTRFS_I(inode)->reserved_extents)
4962                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4963                         BTRFS_I(inode)->reserved_extents;
4964
4965         /*
4966          * Add an item to reserve for updating the inode when we complete the
4967          * delalloc io.
4968          */
4969         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4970                       &BTRFS_I(inode)->runtime_flags)) {
4971                 nr_extents++;
4972                 extra_reserve = 1;
4973         }
4974
4975         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4976         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4977         csum_bytes = BTRFS_I(inode)->csum_bytes;
4978         spin_unlock(&BTRFS_I(inode)->lock);
4979
4980         if (root->fs_info->quota_enabled) {
4981                 ret = btrfs_qgroup_reserve(root, num_bytes +
4982                                            nr_extents * root->leafsize);
4983                 if (ret)
4984                         goto out_fail;
4985         }
4986
4987         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4988         if (unlikely(ret)) {
4989                 if (root->fs_info->quota_enabled)
4990                         btrfs_qgroup_free(root, num_bytes +
4991                                                 nr_extents * root->leafsize);
4992                 goto out_fail;
4993         }
4994
4995         spin_lock(&BTRFS_I(inode)->lock);
4996         if (extra_reserve) {
4997                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4998                         &BTRFS_I(inode)->runtime_flags);
4999                 nr_extents--;
5000         }
5001         BTRFS_I(inode)->reserved_extents += nr_extents;
5002         spin_unlock(&BTRFS_I(inode)->lock);
5003
5004         if (delalloc_lock)
5005                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5006
5007         if (to_reserve)
5008                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5009                                               btrfs_ino(inode), to_reserve, 1);
5010         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5011
5012         return 0;
5013
5014 out_fail:
5015         spin_lock(&BTRFS_I(inode)->lock);
5016         dropped = drop_outstanding_extent(inode);
5017         /*
5018          * If the inodes csum_bytes is the same as the original
5019          * csum_bytes then we know we haven't raced with any free()ers
5020          * so we can just reduce our inodes csum bytes and carry on.
5021          */
5022         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5023                 calc_csum_metadata_size(inode, num_bytes, 0);
5024         } else {
5025                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5026                 u64 bytes;
5027
5028                 /*
5029                  * This is tricky, but first we need to figure out how much we
5030                  * free'd from any free-ers that occured during this
5031                  * reservation, so we reset ->csum_bytes to the csum_bytes
5032                  * before we dropped our lock, and then call the free for the
5033                  * number of bytes that were freed while we were trying our
5034                  * reservation.
5035                  */
5036                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5037                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5038                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5039
5040
5041                 /*
5042                  * Now we need to see how much we would have freed had we not
5043                  * been making this reservation and our ->csum_bytes were not
5044                  * artificially inflated.
5045                  */
5046                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5047                 bytes = csum_bytes - orig_csum_bytes;
5048                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5049
5050                 /*
5051                  * Now reset ->csum_bytes to what it should be.  If bytes is
5052                  * more than to_free then we would have free'd more space had we
5053                  * not had an artificially high ->csum_bytes, so we need to free
5054                  * the remainder.  If bytes is the same or less then we don't
5055                  * need to do anything, the other free-ers did the correct
5056                  * thing.
5057                  */
5058                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5059                 if (bytes > to_free)
5060                         to_free = bytes - to_free;
5061                 else
5062                         to_free = 0;
5063         }
5064         spin_unlock(&BTRFS_I(inode)->lock);
5065         if (dropped)
5066                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5067
5068         if (to_free) {
5069                 btrfs_block_rsv_release(root, block_rsv, to_free);
5070                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5071                                               btrfs_ino(inode), to_free, 0);
5072         }
5073         if (delalloc_lock)
5074                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5075         return ret;
5076 }
5077
5078 /**
5079  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5080  * @inode: the inode to release the reservation for
5081  * @num_bytes: the number of bytes we're releasing
5082  *
5083  * This will release the metadata reservation for an inode.  This can be called
5084  * once we complete IO for a given set of bytes to release their metadata
5085  * reservations.
5086  */
5087 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5088 {
5089         struct btrfs_root *root = BTRFS_I(inode)->root;
5090         u64 to_free = 0;
5091         unsigned dropped;
5092
5093         num_bytes = ALIGN(num_bytes, root->sectorsize);
5094         spin_lock(&BTRFS_I(inode)->lock);
5095         dropped = drop_outstanding_extent(inode);
5096
5097         if (num_bytes)
5098                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5099         spin_unlock(&BTRFS_I(inode)->lock);
5100         if (dropped > 0)
5101                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5102
5103         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5104                                       btrfs_ino(inode), to_free, 0);
5105         if (root->fs_info->quota_enabled) {
5106                 btrfs_qgroup_free(root, num_bytes +
5107                                         dropped * root->leafsize);
5108         }
5109
5110         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5111                                 to_free);
5112 }
5113
5114 /**
5115  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5116  * @inode: inode we're writing to
5117  * @num_bytes: the number of bytes we want to allocate
5118  *
5119  * This will do the following things
5120  *
5121  * o reserve space in the data space info for num_bytes
5122  * o reserve space in the metadata space info based on number of outstanding
5123  *   extents and how much csums will be needed
5124  * o add to the inodes ->delalloc_bytes
5125  * o add it to the fs_info's delalloc inodes list.
5126  *
5127  * This will return 0 for success and -ENOSPC if there is no space left.
5128  */
5129 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5130 {
5131         int ret;
5132
5133         ret = btrfs_check_data_free_space(inode, num_bytes);
5134         if (ret)
5135                 return ret;
5136
5137         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5138         if (ret) {
5139                 btrfs_free_reserved_data_space(inode, num_bytes);
5140                 return ret;
5141         }
5142
5143         return 0;
5144 }
5145
5146 /**
5147  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5148  * @inode: inode we're releasing space for
5149  * @num_bytes: the number of bytes we want to free up
5150  *
5151  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5152  * called in the case that we don't need the metadata AND data reservations
5153  * anymore.  So if there is an error or we insert an inline extent.
5154  *
5155  * This function will release the metadata space that was not used and will
5156  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5157  * list if there are no delalloc bytes left.
5158  */
5159 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5160 {
5161         btrfs_delalloc_release_metadata(inode, num_bytes);
5162         btrfs_free_reserved_data_space(inode, num_bytes);
5163 }
5164
5165 static int update_block_group(struct btrfs_root *root,
5166                               u64 bytenr, u64 num_bytes, int alloc)
5167 {
5168         struct btrfs_block_group_cache *cache = NULL;
5169         struct btrfs_fs_info *info = root->fs_info;
5170         u64 total = num_bytes;
5171         u64 old_val;
5172         u64 byte_in_group;
5173         int factor;
5174
5175         /* block accounting for super block */
5176         spin_lock(&info->delalloc_root_lock);
5177         old_val = btrfs_super_bytes_used(info->super_copy);
5178         if (alloc)
5179                 old_val += num_bytes;
5180         else
5181                 old_val -= num_bytes;
5182         btrfs_set_super_bytes_used(info->super_copy, old_val);
5183         spin_unlock(&info->delalloc_root_lock);
5184
5185         while (total) {
5186                 cache = btrfs_lookup_block_group(info, bytenr);
5187                 if (!cache)
5188                         return -ENOENT;
5189                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5190                                     BTRFS_BLOCK_GROUP_RAID1 |
5191                                     BTRFS_BLOCK_GROUP_RAID10))
5192                         factor = 2;
5193                 else
5194                         factor = 1;
5195                 /*
5196                  * If this block group has free space cache written out, we
5197                  * need to make sure to load it if we are removing space.  This
5198                  * is because we need the unpinning stage to actually add the
5199                  * space back to the block group, otherwise we will leak space.
5200                  */
5201                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5202                         cache_block_group(cache, 1);
5203
5204                 byte_in_group = bytenr - cache->key.objectid;
5205                 WARN_ON(byte_in_group > cache->key.offset);
5206
5207                 spin_lock(&cache->space_info->lock);
5208                 spin_lock(&cache->lock);
5209
5210                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5211                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5212                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5213
5214                 cache->dirty = 1;
5215                 old_val = btrfs_block_group_used(&cache->item);
5216                 num_bytes = min(total, cache->key.offset - byte_in_group);
5217                 if (alloc) {
5218                         old_val += num_bytes;
5219                         btrfs_set_block_group_used(&cache->item, old_val);
5220                         cache->reserved -= num_bytes;
5221                         cache->space_info->bytes_reserved -= num_bytes;
5222                         cache->space_info->bytes_used += num_bytes;
5223                         cache->space_info->disk_used += num_bytes * factor;
5224                         spin_unlock(&cache->lock);
5225                         spin_unlock(&cache->space_info->lock);
5226                 } else {
5227                         old_val -= num_bytes;
5228                         btrfs_set_block_group_used(&cache->item, old_val);
5229                         cache->pinned += num_bytes;
5230                         cache->space_info->bytes_pinned += num_bytes;
5231                         cache->space_info->bytes_used -= num_bytes;
5232                         cache->space_info->disk_used -= num_bytes * factor;
5233                         spin_unlock(&cache->lock);
5234                         spin_unlock(&cache->space_info->lock);
5235
5236                         set_extent_dirty(info->pinned_extents,
5237                                          bytenr, bytenr + num_bytes - 1,
5238                                          GFP_NOFS | __GFP_NOFAIL);
5239                 }
5240                 btrfs_put_block_group(cache);
5241                 total -= num_bytes;
5242                 bytenr += num_bytes;
5243         }
5244         return 0;
5245 }
5246
5247 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5248 {
5249         struct btrfs_block_group_cache *cache;
5250         u64 bytenr;
5251
5252         spin_lock(&root->fs_info->block_group_cache_lock);
5253         bytenr = root->fs_info->first_logical_byte;
5254         spin_unlock(&root->fs_info->block_group_cache_lock);
5255
5256         if (bytenr < (u64)-1)
5257                 return bytenr;
5258
5259         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5260         if (!cache)
5261                 return 0;
5262
5263         bytenr = cache->key.objectid;
5264         btrfs_put_block_group(cache);
5265
5266         return bytenr;
5267 }
5268
5269 static int pin_down_extent(struct btrfs_root *root,
5270                            struct btrfs_block_group_cache *cache,
5271                            u64 bytenr, u64 num_bytes, int reserved)
5272 {
5273         spin_lock(&cache->space_info->lock);
5274         spin_lock(&cache->lock);
5275         cache->pinned += num_bytes;
5276         cache->space_info->bytes_pinned += num_bytes;
5277         if (reserved) {
5278                 cache->reserved -= num_bytes;
5279                 cache->space_info->bytes_reserved -= num_bytes;
5280         }
5281         spin_unlock(&cache->lock);
5282         spin_unlock(&cache->space_info->lock);
5283
5284         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5285                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5286         if (reserved)
5287                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5288         return 0;
5289 }
5290
5291 /*
5292  * this function must be called within transaction
5293  */
5294 int btrfs_pin_extent(struct btrfs_root *root,
5295                      u64 bytenr, u64 num_bytes, int reserved)
5296 {
5297         struct btrfs_block_group_cache *cache;
5298
5299         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5300         BUG_ON(!cache); /* Logic error */
5301
5302         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5303
5304         btrfs_put_block_group(cache);
5305         return 0;
5306 }
5307
5308 /*
5309  * this function must be called within transaction
5310  */
5311 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5312                                     u64 bytenr, u64 num_bytes)
5313 {
5314         struct btrfs_block_group_cache *cache;
5315         int ret;
5316
5317         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5318         if (!cache)
5319                 return -EINVAL;
5320
5321         /*
5322          * pull in the free space cache (if any) so that our pin
5323          * removes the free space from the cache.  We have load_only set
5324          * to one because the slow code to read in the free extents does check
5325          * the pinned extents.
5326          */
5327         cache_block_group(cache, 1);
5328
5329         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5330
5331         /* remove us from the free space cache (if we're there at all) */
5332         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5333         btrfs_put_block_group(cache);
5334         return ret;
5335 }
5336
5337 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5338 {
5339         int ret;
5340         struct btrfs_block_group_cache *block_group;
5341         struct btrfs_caching_control *caching_ctl;
5342
5343         block_group = btrfs_lookup_block_group(root->fs_info, start);
5344         if (!block_group)
5345                 return -EINVAL;
5346
5347         cache_block_group(block_group, 0);
5348         caching_ctl = get_caching_control(block_group);
5349
5350         if (!caching_ctl) {
5351                 /* Logic error */
5352                 BUG_ON(!block_group_cache_done(block_group));
5353                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5354         } else {
5355                 mutex_lock(&caching_ctl->mutex);
5356
5357                 if (start >= caching_ctl->progress) {
5358                         ret = add_excluded_extent(root, start, num_bytes);
5359                 } else if (start + num_bytes <= caching_ctl->progress) {
5360                         ret = btrfs_remove_free_space(block_group,
5361                                                       start, num_bytes);
5362                 } else {
5363                         num_bytes = caching_ctl->progress - start;
5364                         ret = btrfs_remove_free_space(block_group,
5365                                                       start, num_bytes);
5366                         if (ret)
5367                                 goto out_lock;
5368
5369                         num_bytes = (start + num_bytes) -
5370                                 caching_ctl->progress;
5371                         start = caching_ctl->progress;
5372                         ret = add_excluded_extent(root, start, num_bytes);
5373                 }
5374 out_lock:
5375                 mutex_unlock(&caching_ctl->mutex);
5376                 put_caching_control(caching_ctl);
5377         }
5378         btrfs_put_block_group(block_group);
5379         return ret;
5380 }
5381
5382 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5383                                  struct extent_buffer *eb)
5384 {
5385         struct btrfs_file_extent_item *item;
5386         struct btrfs_key key;
5387         int found_type;
5388         int i;
5389
5390         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5391                 return 0;
5392
5393         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5394                 btrfs_item_key_to_cpu(eb, &key, i);
5395                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5396                         continue;
5397                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5398                 found_type = btrfs_file_extent_type(eb, item);
5399                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5400                         continue;
5401                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5402                         continue;
5403                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5404                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5405                 __exclude_logged_extent(log, key.objectid, key.offset);
5406         }
5407
5408         return 0;
5409 }
5410
5411 /**
5412  * btrfs_update_reserved_bytes - update the block_group and space info counters
5413  * @cache:      The cache we are manipulating
5414  * @num_bytes:  The number of bytes in question
5415  * @reserve:    One of the reservation enums
5416  *
5417  * This is called by the allocator when it reserves space, or by somebody who is
5418  * freeing space that was never actually used on disk.  For example if you
5419  * reserve some space for a new leaf in transaction A and before transaction A
5420  * commits you free that leaf, you call this with reserve set to 0 in order to
5421  * clear the reservation.
5422  *
5423  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5424  * ENOSPC accounting.  For data we handle the reservation through clearing the
5425  * delalloc bits in the io_tree.  We have to do this since we could end up
5426  * allocating less disk space for the amount of data we have reserved in the
5427  * case of compression.
5428  *
5429  * If this is a reservation and the block group has become read only we cannot
5430  * make the reservation and return -EAGAIN, otherwise this function always
5431  * succeeds.
5432  */
5433 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5434                                        u64 num_bytes, int reserve)
5435 {
5436         struct btrfs_space_info *space_info = cache->space_info;
5437         int ret = 0;
5438
5439         spin_lock(&space_info->lock);
5440         spin_lock(&cache->lock);
5441         if (reserve != RESERVE_FREE) {
5442                 if (cache->ro) {
5443                         ret = -EAGAIN;
5444                 } else {
5445                         cache->reserved += num_bytes;
5446                         space_info->bytes_reserved += num_bytes;
5447                         if (reserve == RESERVE_ALLOC) {
5448                                 trace_btrfs_space_reservation(cache->fs_info,
5449                                                 "space_info", space_info->flags,
5450                                                 num_bytes, 0);
5451                                 space_info->bytes_may_use -= num_bytes;
5452                         }
5453                 }
5454         } else {
5455                 if (cache->ro)
5456                         space_info->bytes_readonly += num_bytes;
5457                 cache->reserved -= num_bytes;
5458                 space_info->bytes_reserved -= num_bytes;
5459         }
5460         spin_unlock(&cache->lock);
5461         spin_unlock(&space_info->lock);
5462         return ret;
5463 }
5464
5465 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5466                                 struct btrfs_root *root)
5467 {
5468         struct btrfs_fs_info *fs_info = root->fs_info;
5469         struct btrfs_caching_control *next;
5470         struct btrfs_caching_control *caching_ctl;
5471         struct btrfs_block_group_cache *cache;
5472         struct btrfs_space_info *space_info;
5473
5474         down_write(&fs_info->commit_root_sem);
5475
5476         list_for_each_entry_safe(caching_ctl, next,
5477                                  &fs_info->caching_block_groups, list) {
5478                 cache = caching_ctl->block_group;
5479                 if (block_group_cache_done(cache)) {
5480                         cache->last_byte_to_unpin = (u64)-1;
5481                         list_del_init(&caching_ctl->list);
5482                         put_caching_control(caching_ctl);
5483                 } else {
5484                         cache->last_byte_to_unpin = caching_ctl->progress;
5485                 }
5486         }
5487
5488         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5489                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5490         else
5491                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5492
5493         up_write(&fs_info->commit_root_sem);
5494
5495         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5496                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5497
5498         update_global_block_rsv(fs_info);
5499 }
5500
5501 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5502 {
5503         struct btrfs_fs_info *fs_info = root->fs_info;
5504         struct btrfs_block_group_cache *cache = NULL;
5505         struct btrfs_space_info *space_info;
5506         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5507         u64 len;
5508         bool readonly;
5509
5510         while (start <= end) {
5511                 readonly = false;
5512                 if (!cache ||
5513                     start >= cache->key.objectid + cache->key.offset) {
5514                         if (cache)
5515                                 btrfs_put_block_group(cache);
5516                         cache = btrfs_lookup_block_group(fs_info, start);
5517                         BUG_ON(!cache); /* Logic error */
5518                 }
5519
5520                 len = cache->key.objectid + cache->key.offset - start;
5521                 len = min(len, end + 1 - start);
5522
5523                 if (start < cache->last_byte_to_unpin) {
5524                         len = min(len, cache->last_byte_to_unpin - start);
5525                         btrfs_add_free_space(cache, start, len);
5526                 }
5527
5528                 start += len;
5529                 space_info = cache->space_info;
5530
5531                 spin_lock(&space_info->lock);
5532                 spin_lock(&cache->lock);
5533                 cache->pinned -= len;
5534                 space_info->bytes_pinned -= len;
5535                 if (cache->ro) {
5536                         space_info->bytes_readonly += len;
5537                         readonly = true;
5538                 }
5539                 spin_unlock(&cache->lock);
5540                 if (!readonly && global_rsv->space_info == space_info) {
5541                         spin_lock(&global_rsv->lock);
5542                         if (!global_rsv->full) {
5543                                 len = min(len, global_rsv->size -
5544                                           global_rsv->reserved);
5545                                 global_rsv->reserved += len;
5546                                 space_info->bytes_may_use += len;
5547                                 if (global_rsv->reserved >= global_rsv->size)
5548                                         global_rsv->full = 1;
5549                         }
5550                         spin_unlock(&global_rsv->lock);
5551                 }
5552                 spin_unlock(&space_info->lock);
5553         }
5554
5555         if (cache)
5556                 btrfs_put_block_group(cache);
5557         return 0;
5558 }
5559
5560 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5561                                struct btrfs_root *root)
5562 {
5563         struct btrfs_fs_info *fs_info = root->fs_info;
5564         struct extent_io_tree *unpin;
5565         u64 start;
5566         u64 end;
5567         int ret;
5568
5569         if (trans->aborted)
5570                 return 0;
5571
5572         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5573                 unpin = &fs_info->freed_extents[1];
5574         else
5575                 unpin = &fs_info->freed_extents[0];
5576
5577         while (1) {
5578                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5579                                             EXTENT_DIRTY, NULL);
5580                 if (ret)
5581                         break;
5582
5583                 if (btrfs_test_opt(root, DISCARD))
5584                         ret = btrfs_discard_extent(root, start,
5585                                                    end + 1 - start, NULL);
5586
5587                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5588                 unpin_extent_range(root, start, end);
5589                 cond_resched();
5590         }
5591
5592         return 0;
5593 }
5594
5595 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5596                              u64 owner, u64 root_objectid)
5597 {
5598         struct btrfs_space_info *space_info;
5599         u64 flags;
5600
5601         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5602                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5603                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5604                 else
5605                         flags = BTRFS_BLOCK_GROUP_METADATA;
5606         } else {
5607                 flags = BTRFS_BLOCK_GROUP_DATA;
5608         }
5609
5610         space_info = __find_space_info(fs_info, flags);
5611         BUG_ON(!space_info); /* Logic bug */
5612         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5613 }
5614
5615
5616 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5617                                 struct btrfs_root *root,
5618                                 u64 bytenr, u64 num_bytes, u64 parent,
5619                                 u64 root_objectid, u64 owner_objectid,
5620                                 u64 owner_offset, int refs_to_drop,
5621                                 struct btrfs_delayed_extent_op *extent_op)
5622 {
5623         struct btrfs_key key;
5624         struct btrfs_path *path;
5625         struct btrfs_fs_info *info = root->fs_info;
5626         struct btrfs_root *extent_root = info->extent_root;
5627         struct extent_buffer *leaf;
5628         struct btrfs_extent_item *ei;
5629         struct btrfs_extent_inline_ref *iref;
5630         int ret;
5631         int is_data;
5632         int extent_slot = 0;
5633         int found_extent = 0;
5634         int num_to_del = 1;
5635         u32 item_size;
5636         u64 refs;
5637         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5638                                                  SKINNY_METADATA);
5639
5640         path = btrfs_alloc_path();
5641         if (!path)
5642                 return -ENOMEM;
5643
5644         path->reada = 1;
5645         path->leave_spinning = 1;
5646
5647         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5648         BUG_ON(!is_data && refs_to_drop != 1);
5649
5650         if (is_data)
5651                 skinny_metadata = 0;
5652
5653         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5654                                     bytenr, num_bytes, parent,
5655                                     root_objectid, owner_objectid,
5656                                     owner_offset);
5657         if (ret == 0) {
5658                 extent_slot = path->slots[0];
5659                 while (extent_slot >= 0) {
5660                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5661                                               extent_slot);
5662                         if (key.objectid != bytenr)
5663                                 break;
5664                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5665                             key.offset == num_bytes) {
5666                                 found_extent = 1;
5667                                 break;
5668                         }
5669                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5670                             key.offset == owner_objectid) {
5671                                 found_extent = 1;
5672                                 break;
5673                         }
5674                         if (path->slots[0] - extent_slot > 5)
5675                                 break;
5676                         extent_slot--;
5677                 }
5678 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5679                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5680                 if (found_extent && item_size < sizeof(*ei))
5681                         found_extent = 0;
5682 #endif
5683                 if (!found_extent) {
5684                         BUG_ON(iref);
5685                         ret = remove_extent_backref(trans, extent_root, path,
5686                                                     NULL, refs_to_drop,
5687                                                     is_data);
5688                         if (ret) {
5689                                 btrfs_abort_transaction(trans, extent_root, ret);
5690                                 goto out;
5691                         }
5692                         btrfs_release_path(path);
5693                         path->leave_spinning = 1;
5694
5695                         key.objectid = bytenr;
5696                         key.type = BTRFS_EXTENT_ITEM_KEY;
5697                         key.offset = num_bytes;
5698
5699                         if (!is_data && skinny_metadata) {
5700                                 key.type = BTRFS_METADATA_ITEM_KEY;
5701                                 key.offset = owner_objectid;
5702                         }
5703
5704                         ret = btrfs_search_slot(trans, extent_root,
5705                                                 &key, path, -1, 1);
5706                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5707                                 /*
5708                                  * Couldn't find our skinny metadata item,
5709                                  * see if we have ye olde extent item.
5710                                  */
5711                                 path->slots[0]--;
5712                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5713                                                       path->slots[0]);
5714                                 if (key.objectid == bytenr &&
5715                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5716                                     key.offset == num_bytes)
5717                                         ret = 0;
5718                         }
5719
5720                         if (ret > 0 && skinny_metadata) {
5721                                 skinny_metadata = false;
5722                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5723                                 key.offset = num_bytes;
5724                                 btrfs_release_path(path);
5725                                 ret = btrfs_search_slot(trans, extent_root,
5726                                                         &key, path, -1, 1);
5727                         }
5728
5729                         if (ret) {
5730                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5731                                         ret, bytenr);
5732                                 if (ret > 0)
5733                                         btrfs_print_leaf(extent_root,
5734                                                          path->nodes[0]);
5735                         }
5736                         if (ret < 0) {
5737                                 btrfs_abort_transaction(trans, extent_root, ret);
5738                                 goto out;
5739                         }
5740                         extent_slot = path->slots[0];
5741                 }
5742         } else if (WARN_ON(ret == -ENOENT)) {
5743                 btrfs_print_leaf(extent_root, path->nodes[0]);
5744                 btrfs_err(info,
5745                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5746                         bytenr, parent, root_objectid, owner_objectid,
5747                         owner_offset);
5748         } else {
5749                 btrfs_abort_transaction(trans, extent_root, ret);
5750                 goto out;
5751         }
5752
5753         leaf = path->nodes[0];
5754         item_size = btrfs_item_size_nr(leaf, extent_slot);
5755 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5756         if (item_size < sizeof(*ei)) {
5757                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5758                 ret = convert_extent_item_v0(trans, extent_root, path,
5759                                              owner_objectid, 0);
5760                 if (ret < 0) {
5761                         btrfs_abort_transaction(trans, extent_root, ret);
5762                         goto out;
5763                 }
5764
5765                 btrfs_release_path(path);
5766                 path->leave_spinning = 1;
5767
5768                 key.objectid = bytenr;
5769                 key.type = BTRFS_EXTENT_ITEM_KEY;
5770                 key.offset = num_bytes;
5771
5772                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5773                                         -1, 1);
5774                 if (ret) {
5775                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5776                                 ret, bytenr);
5777                         btrfs_print_leaf(extent_root, path->nodes[0]);
5778                 }
5779                 if (ret < 0) {
5780                         btrfs_abort_transaction(trans, extent_root, ret);
5781                         goto out;
5782                 }
5783
5784                 extent_slot = path->slots[0];
5785                 leaf = path->nodes[0];
5786                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5787         }
5788 #endif
5789         BUG_ON(item_size < sizeof(*ei));
5790         ei = btrfs_item_ptr(leaf, extent_slot,
5791                             struct btrfs_extent_item);
5792         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5793             key.type == BTRFS_EXTENT_ITEM_KEY) {
5794                 struct btrfs_tree_block_info *bi;
5795                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5796                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5797                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5798         }
5799
5800         refs = btrfs_extent_refs(leaf, ei);
5801         if (refs < refs_to_drop) {
5802                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5803                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5804                 ret = -EINVAL;
5805                 btrfs_abort_transaction(trans, extent_root, ret);
5806                 goto out;
5807         }
5808         refs -= refs_to_drop;
5809
5810         if (refs > 0) {
5811                 if (extent_op)
5812                         __run_delayed_extent_op(extent_op, leaf, ei);
5813                 /*
5814                  * In the case of inline back ref, reference count will
5815                  * be updated by remove_extent_backref
5816                  */
5817                 if (iref) {
5818                         BUG_ON(!found_extent);
5819                 } else {
5820                         btrfs_set_extent_refs(leaf, ei, refs);
5821                         btrfs_mark_buffer_dirty(leaf);
5822                 }
5823                 if (found_extent) {
5824                         ret = remove_extent_backref(trans, extent_root, path,
5825                                                     iref, refs_to_drop,
5826                                                     is_data);
5827                         if (ret) {
5828                                 btrfs_abort_transaction(trans, extent_root, ret);
5829                                 goto out;
5830                         }
5831                 }
5832                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5833                                  root_objectid);
5834         } else {
5835                 if (found_extent) {
5836                         BUG_ON(is_data && refs_to_drop !=
5837                                extent_data_ref_count(root, path, iref));
5838                         if (iref) {
5839                                 BUG_ON(path->slots[0] != extent_slot);
5840                         } else {
5841                                 BUG_ON(path->slots[0] != extent_slot + 1);
5842                                 path->slots[0] = extent_slot;
5843                                 num_to_del = 2;
5844                         }
5845                 }
5846
5847                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5848                                       num_to_del);
5849                 if (ret) {
5850                         btrfs_abort_transaction(trans, extent_root, ret);
5851                         goto out;
5852                 }
5853                 btrfs_release_path(path);
5854
5855                 if (is_data) {
5856                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5857                         if (ret) {
5858                                 btrfs_abort_transaction(trans, extent_root, ret);
5859                                 goto out;
5860                         }
5861                 }
5862
5863                 ret = update_block_group(root, bytenr, num_bytes, 0);
5864                 if (ret) {
5865                         btrfs_abort_transaction(trans, extent_root, ret);
5866                         goto out;
5867                 }
5868         }
5869 out:
5870         btrfs_free_path(path);
5871         return ret;
5872 }
5873
5874 /*
5875  * when we free an block, it is possible (and likely) that we free the last
5876  * delayed ref for that extent as well.  This searches the delayed ref tree for
5877  * a given extent, and if there are no other delayed refs to be processed, it
5878  * removes it from the tree.
5879  */
5880 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5881                                       struct btrfs_root *root, u64 bytenr)
5882 {
5883         struct btrfs_delayed_ref_head *head;
5884         struct btrfs_delayed_ref_root *delayed_refs;
5885         int ret = 0;
5886
5887         delayed_refs = &trans->transaction->delayed_refs;
5888         spin_lock(&delayed_refs->lock);
5889         head = btrfs_find_delayed_ref_head(trans, bytenr);
5890         if (!head)
5891                 goto out_delayed_unlock;
5892
5893         spin_lock(&head->lock);
5894         if (rb_first(&head->ref_root))
5895                 goto out;
5896
5897         if (head->extent_op) {
5898                 if (!head->must_insert_reserved)
5899                         goto out;
5900                 btrfs_free_delayed_extent_op(head->extent_op);
5901                 head->extent_op = NULL;
5902         }
5903
5904         /*
5905          * waiting for the lock here would deadlock.  If someone else has it
5906          * locked they are already in the process of dropping it anyway
5907          */
5908         if (!mutex_trylock(&head->mutex))
5909                 goto out;
5910
5911         /*
5912          * at this point we have a head with no other entries.  Go
5913          * ahead and process it.
5914          */
5915         head->node.in_tree = 0;
5916         rb_erase(&head->href_node, &delayed_refs->href_root);
5917
5918         atomic_dec(&delayed_refs->num_entries);
5919
5920         /*
5921          * we don't take a ref on the node because we're removing it from the
5922          * tree, so we just steal the ref the tree was holding.
5923          */
5924         delayed_refs->num_heads--;
5925         if (head->processing == 0)
5926                 delayed_refs->num_heads_ready--;
5927         head->processing = 0;
5928         spin_unlock(&head->lock);
5929         spin_unlock(&delayed_refs->lock);
5930
5931         BUG_ON(head->extent_op);
5932         if (head->must_insert_reserved)
5933                 ret = 1;
5934
5935         mutex_unlock(&head->mutex);
5936         btrfs_put_delayed_ref(&head->node);
5937         return ret;
5938 out:
5939         spin_unlock(&head->lock);
5940
5941 out_delayed_unlock:
5942         spin_unlock(&delayed_refs->lock);
5943         return 0;
5944 }
5945
5946 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5947                            struct btrfs_root *root,
5948                            struct extent_buffer *buf,
5949                            u64 parent, int last_ref)
5950 {
5951         struct btrfs_block_group_cache *cache = NULL;
5952         int pin = 1;
5953         int ret;
5954
5955         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5956                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5957                                         buf->start, buf->len,
5958                                         parent, root->root_key.objectid,
5959                                         btrfs_header_level(buf),
5960                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5961                 BUG_ON(ret); /* -ENOMEM */
5962         }
5963
5964         if (!last_ref)
5965                 return;
5966
5967         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5968
5969         if (btrfs_header_generation(buf) == trans->transid) {
5970                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5971                         ret = check_ref_cleanup(trans, root, buf->start);
5972                         if (!ret)
5973                                 goto out;
5974                 }
5975
5976                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5977                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5978                         goto out;
5979                 }
5980
5981                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5982
5983                 btrfs_add_free_space(cache, buf->start, buf->len);
5984                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5985                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
5986                 pin = 0;
5987         }
5988 out:
5989         if (pin)
5990                 add_pinned_bytes(root->fs_info, buf->len,
5991                                  btrfs_header_level(buf),
5992                                  root->root_key.objectid);
5993
5994         /*
5995          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5996          * anymore.
5997          */
5998         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5999         btrfs_put_block_group(cache);
6000 }
6001
6002 /* Can return -ENOMEM */
6003 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6004                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6005                       u64 owner, u64 offset, int for_cow)
6006 {
6007         int ret;
6008         struct btrfs_fs_info *fs_info = root->fs_info;
6009
6010         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6011
6012         /*
6013          * tree log blocks never actually go into the extent allocation
6014          * tree, just update pinning info and exit early.
6015          */
6016         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6017                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6018                 /* unlocks the pinned mutex */
6019                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6020                 ret = 0;
6021         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6022                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6023                                         num_bytes,
6024                                         parent, root_objectid, (int)owner,
6025                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6026         } else {
6027                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6028                                                 num_bytes,
6029                                                 parent, root_objectid, owner,
6030                                                 offset, BTRFS_DROP_DELAYED_REF,
6031                                                 NULL, for_cow);
6032         }
6033         return ret;
6034 }
6035
6036 static u64 stripe_align(struct btrfs_root *root,
6037                         struct btrfs_block_group_cache *cache,
6038                         u64 val, u64 num_bytes)
6039 {
6040         u64 ret = ALIGN(val, root->stripesize);
6041         return ret;
6042 }
6043
6044 /*
6045  * when we wait for progress in the block group caching, its because
6046  * our allocation attempt failed at least once.  So, we must sleep
6047  * and let some progress happen before we try again.
6048  *
6049  * This function will sleep at least once waiting for new free space to
6050  * show up, and then it will check the block group free space numbers
6051  * for our min num_bytes.  Another option is to have it go ahead
6052  * and look in the rbtree for a free extent of a given size, but this
6053  * is a good start.
6054  *
6055  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6056  * any of the information in this block group.
6057  */
6058 static noinline void
6059 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6060                                 u64 num_bytes)
6061 {
6062         struct btrfs_caching_control *caching_ctl;
6063
6064         caching_ctl = get_caching_control(cache);
6065         if (!caching_ctl)
6066                 return;
6067
6068         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6069                    (cache->free_space_ctl->free_space >= num_bytes));
6070
6071         put_caching_control(caching_ctl);
6072 }
6073
6074 static noinline int
6075 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6076 {
6077         struct btrfs_caching_control *caching_ctl;
6078         int ret = 0;
6079
6080         caching_ctl = get_caching_control(cache);
6081         if (!caching_ctl)
6082                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6083
6084         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6085         if (cache->cached == BTRFS_CACHE_ERROR)
6086                 ret = -EIO;
6087         put_caching_control(caching_ctl);
6088         return ret;
6089 }
6090
6091 int __get_raid_index(u64 flags)
6092 {
6093         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6094                 return BTRFS_RAID_RAID10;
6095         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6096                 return BTRFS_RAID_RAID1;
6097         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6098                 return BTRFS_RAID_DUP;
6099         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6100                 return BTRFS_RAID_RAID0;
6101         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6102                 return BTRFS_RAID_RAID5;
6103         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6104                 return BTRFS_RAID_RAID6;
6105
6106         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6107 }
6108
6109 int get_block_group_index(struct btrfs_block_group_cache *cache)
6110 {
6111         return __get_raid_index(cache->flags);
6112 }
6113
6114 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6115         [BTRFS_RAID_RAID10]     = "raid10",
6116         [BTRFS_RAID_RAID1]      = "raid1",
6117         [BTRFS_RAID_DUP]        = "dup",
6118         [BTRFS_RAID_RAID0]      = "raid0",
6119         [BTRFS_RAID_SINGLE]     = "single",
6120         [BTRFS_RAID_RAID5]      = "raid5",
6121         [BTRFS_RAID_RAID6]      = "raid6",
6122 };
6123
6124 static const char *get_raid_name(enum btrfs_raid_types type)
6125 {
6126         if (type >= BTRFS_NR_RAID_TYPES)
6127                 return NULL;
6128
6129         return btrfs_raid_type_names[type];
6130 }
6131
6132 enum btrfs_loop_type {
6133         LOOP_CACHING_NOWAIT = 0,
6134         LOOP_CACHING_WAIT = 1,
6135         LOOP_ALLOC_CHUNK = 2,
6136         LOOP_NO_EMPTY_SIZE = 3,
6137 };
6138
6139 /*
6140  * walks the btree of allocated extents and find a hole of a given size.
6141  * The key ins is changed to record the hole:
6142  * ins->objectid == start position
6143  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6144  * ins->offset == the size of the hole.
6145  * Any available blocks before search_start are skipped.
6146  *
6147  * If there is no suitable free space, we will record the max size of
6148  * the free space extent currently.
6149  */
6150 static noinline int find_free_extent(struct btrfs_root *orig_root,
6151                                      u64 num_bytes, u64 empty_size,
6152                                      u64 hint_byte, struct btrfs_key *ins,
6153                                      u64 flags)
6154 {
6155         int ret = 0;
6156         struct btrfs_root *root = orig_root->fs_info->extent_root;
6157         struct btrfs_free_cluster *last_ptr = NULL;
6158         struct btrfs_block_group_cache *block_group = NULL;
6159         u64 search_start = 0;
6160         u64 max_extent_size = 0;
6161         int empty_cluster = 2 * 1024 * 1024;
6162         struct btrfs_space_info *space_info;
6163         int loop = 0;
6164         int index = __get_raid_index(flags);
6165         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6166                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6167         bool failed_cluster_refill = false;
6168         bool failed_alloc = false;
6169         bool use_cluster = true;
6170         bool have_caching_bg = false;
6171
6172         WARN_ON(num_bytes < root->sectorsize);
6173         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6174         ins->objectid = 0;
6175         ins->offset = 0;
6176
6177         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6178
6179         space_info = __find_space_info(root->fs_info, flags);
6180         if (!space_info) {
6181                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6182                 return -ENOSPC;
6183         }
6184
6185         /*
6186          * If the space info is for both data and metadata it means we have a
6187          * small filesystem and we can't use the clustering stuff.
6188          */
6189         if (btrfs_mixed_space_info(space_info))
6190                 use_cluster = false;
6191
6192         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6193                 last_ptr = &root->fs_info->meta_alloc_cluster;
6194                 if (!btrfs_test_opt(root, SSD))
6195                         empty_cluster = 64 * 1024;
6196         }
6197
6198         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6199             btrfs_test_opt(root, SSD)) {
6200                 last_ptr = &root->fs_info->data_alloc_cluster;
6201         }
6202
6203         if (last_ptr) {
6204                 spin_lock(&last_ptr->lock);
6205                 if (last_ptr->block_group)
6206                         hint_byte = last_ptr->window_start;
6207                 spin_unlock(&last_ptr->lock);
6208         }
6209
6210         search_start = max(search_start, first_logical_byte(root, 0));
6211         search_start = max(search_start, hint_byte);
6212
6213         if (!last_ptr)
6214                 empty_cluster = 0;
6215
6216         if (search_start == hint_byte) {
6217                 block_group = btrfs_lookup_block_group(root->fs_info,
6218                                                        search_start);
6219                 /*
6220                  * we don't want to use the block group if it doesn't match our
6221                  * allocation bits, or if its not cached.
6222                  *
6223                  * However if we are re-searching with an ideal block group
6224                  * picked out then we don't care that the block group is cached.
6225                  */
6226                 if (block_group && block_group_bits(block_group, flags) &&
6227                     block_group->cached != BTRFS_CACHE_NO) {
6228                         down_read(&space_info->groups_sem);
6229                         if (list_empty(&block_group->list) ||
6230                             block_group->ro) {
6231                                 /*
6232                                  * someone is removing this block group,
6233                                  * we can't jump into the have_block_group
6234                                  * target because our list pointers are not
6235                                  * valid
6236                                  */
6237                                 btrfs_put_block_group(block_group);
6238                                 up_read(&space_info->groups_sem);
6239                         } else {
6240                                 index = get_block_group_index(block_group);
6241                                 goto have_block_group;
6242                         }
6243                 } else if (block_group) {
6244                         btrfs_put_block_group(block_group);
6245                 }
6246         }
6247 search:
6248         have_caching_bg = false;
6249         down_read(&space_info->groups_sem);
6250         list_for_each_entry(block_group, &space_info->block_groups[index],
6251                             list) {
6252                 u64 offset;
6253                 int cached;
6254
6255                 btrfs_get_block_group(block_group);
6256                 search_start = block_group->key.objectid;
6257
6258                 /*
6259                  * this can happen if we end up cycling through all the
6260                  * raid types, but we want to make sure we only allocate
6261                  * for the proper type.
6262                  */
6263                 if (!block_group_bits(block_group, flags)) {
6264                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6265                                 BTRFS_BLOCK_GROUP_RAID1 |
6266                                 BTRFS_BLOCK_GROUP_RAID5 |
6267                                 BTRFS_BLOCK_GROUP_RAID6 |
6268                                 BTRFS_BLOCK_GROUP_RAID10;
6269
6270                         /*
6271                          * if they asked for extra copies and this block group
6272                          * doesn't provide them, bail.  This does allow us to
6273                          * fill raid0 from raid1.
6274                          */
6275                         if ((flags & extra) && !(block_group->flags & extra))
6276                                 goto loop;
6277                 }
6278
6279 have_block_group:
6280                 cached = block_group_cache_done(block_group);
6281                 if (unlikely(!cached)) {
6282                         ret = cache_block_group(block_group, 0);
6283                         BUG_ON(ret < 0);
6284                         ret = 0;
6285                 }
6286
6287                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6288                         goto loop;
6289                 if (unlikely(block_group->ro))
6290                         goto loop;
6291
6292                 /*
6293                  * Ok we want to try and use the cluster allocator, so
6294                  * lets look there
6295                  */
6296                 if (last_ptr) {
6297                         struct btrfs_block_group_cache *used_block_group;
6298                         unsigned long aligned_cluster;
6299                         /*
6300                          * the refill lock keeps out other
6301                          * people trying to start a new cluster
6302                          */
6303                         spin_lock(&last_ptr->refill_lock);
6304                         used_block_group = last_ptr->block_group;
6305                         if (used_block_group != block_group &&
6306                             (!used_block_group ||
6307                              used_block_group->ro ||
6308                              !block_group_bits(used_block_group, flags)))
6309                                 goto refill_cluster;
6310
6311                         if (used_block_group != block_group)
6312                                 btrfs_get_block_group(used_block_group);
6313
6314                         offset = btrfs_alloc_from_cluster(used_block_group,
6315                                                 last_ptr,
6316                                                 num_bytes,
6317                                                 used_block_group->key.objectid,
6318                                                 &max_extent_size);
6319                         if (offset) {
6320                                 /* we have a block, we're done */
6321                                 spin_unlock(&last_ptr->refill_lock);
6322                                 trace_btrfs_reserve_extent_cluster(root,
6323                                                 used_block_group,
6324                                                 search_start, num_bytes);
6325                                 if (used_block_group != block_group) {
6326                                         btrfs_put_block_group(block_group);
6327                                         block_group = used_block_group;
6328                                 }
6329                                 goto checks;
6330                         }
6331
6332                         WARN_ON(last_ptr->block_group != used_block_group);
6333                         if (used_block_group != block_group)
6334                                 btrfs_put_block_group(used_block_group);
6335 refill_cluster:
6336                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6337                          * set up a new clusters, so lets just skip it
6338                          * and let the allocator find whatever block
6339                          * it can find.  If we reach this point, we
6340                          * will have tried the cluster allocator
6341                          * plenty of times and not have found
6342                          * anything, so we are likely way too
6343                          * fragmented for the clustering stuff to find
6344                          * anything.
6345                          *
6346                          * However, if the cluster is taken from the
6347                          * current block group, release the cluster
6348                          * first, so that we stand a better chance of
6349                          * succeeding in the unclustered
6350                          * allocation.  */
6351                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6352                             last_ptr->block_group != block_group) {
6353                                 spin_unlock(&last_ptr->refill_lock);
6354                                 goto unclustered_alloc;
6355                         }
6356
6357                         /*
6358                          * this cluster didn't work out, free it and
6359                          * start over
6360                          */
6361                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6362
6363                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6364                                 spin_unlock(&last_ptr->refill_lock);
6365                                 goto unclustered_alloc;
6366                         }
6367
6368                         aligned_cluster = max_t(unsigned long,
6369                                                 empty_cluster + empty_size,
6370                                               block_group->full_stripe_len);
6371
6372                         /* allocate a cluster in this block group */
6373                         ret = btrfs_find_space_cluster(root, block_group,
6374                                                        last_ptr, search_start,
6375                                                        num_bytes,
6376                                                        aligned_cluster);
6377                         if (ret == 0) {
6378                                 /*
6379                                  * now pull our allocation out of this
6380                                  * cluster
6381                                  */
6382                                 offset = btrfs_alloc_from_cluster(block_group,
6383                                                         last_ptr,
6384                                                         num_bytes,
6385                                                         search_start,
6386                                                         &max_extent_size);
6387                                 if (offset) {
6388                                         /* we found one, proceed */
6389                                         spin_unlock(&last_ptr->refill_lock);
6390                                         trace_btrfs_reserve_extent_cluster(root,
6391                                                 block_group, search_start,
6392                                                 num_bytes);
6393                                         goto checks;
6394                                 }
6395                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6396                                    && !failed_cluster_refill) {
6397                                 spin_unlock(&last_ptr->refill_lock);
6398
6399                                 failed_cluster_refill = true;
6400                                 wait_block_group_cache_progress(block_group,
6401                                        num_bytes + empty_cluster + empty_size);
6402                                 goto have_block_group;
6403                         }
6404
6405                         /*
6406                          * at this point we either didn't find a cluster
6407                          * or we weren't able to allocate a block from our
6408                          * cluster.  Free the cluster we've been trying
6409                          * to use, and go to the next block group
6410                          */
6411                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6412                         spin_unlock(&last_ptr->refill_lock);
6413                         goto loop;
6414                 }
6415
6416 unclustered_alloc:
6417                 spin_lock(&block_group->free_space_ctl->tree_lock);
6418                 if (cached &&
6419                     block_group->free_space_ctl->free_space <
6420                     num_bytes + empty_cluster + empty_size) {
6421                         if (block_group->free_space_ctl->free_space >
6422                             max_extent_size)
6423                                 max_extent_size =
6424                                         block_group->free_space_ctl->free_space;
6425                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6426                         goto loop;
6427                 }
6428                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6429
6430                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6431                                                     num_bytes, empty_size,
6432                                                     &max_extent_size);
6433                 /*
6434                  * If we didn't find a chunk, and we haven't failed on this
6435                  * block group before, and this block group is in the middle of
6436                  * caching and we are ok with waiting, then go ahead and wait
6437                  * for progress to be made, and set failed_alloc to true.
6438                  *
6439                  * If failed_alloc is true then we've already waited on this
6440                  * block group once and should move on to the next block group.
6441                  */
6442                 if (!offset && !failed_alloc && !cached &&
6443                     loop > LOOP_CACHING_NOWAIT) {
6444                         wait_block_group_cache_progress(block_group,
6445                                                 num_bytes + empty_size);
6446                         failed_alloc = true;
6447                         goto have_block_group;
6448                 } else if (!offset) {
6449                         if (!cached)
6450                                 have_caching_bg = true;
6451                         goto loop;
6452                 }
6453 checks:
6454                 search_start = stripe_align(root, block_group,
6455                                             offset, num_bytes);
6456
6457                 /* move on to the next group */
6458                 if (search_start + num_bytes >
6459                     block_group->key.objectid + block_group->key.offset) {
6460                         btrfs_add_free_space(block_group, offset, num_bytes);
6461                         goto loop;
6462                 }
6463
6464                 if (offset < search_start)
6465                         btrfs_add_free_space(block_group, offset,
6466                                              search_start - offset);
6467                 BUG_ON(offset > search_start);
6468
6469                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6470                                                   alloc_type);
6471                 if (ret == -EAGAIN) {
6472                         btrfs_add_free_space(block_group, offset, num_bytes);
6473                         goto loop;
6474                 }
6475
6476                 /* we are all good, lets return */
6477                 ins->objectid = search_start;
6478                 ins->offset = num_bytes;
6479
6480                 trace_btrfs_reserve_extent(orig_root, block_group,
6481                                            search_start, num_bytes);
6482                 btrfs_put_block_group(block_group);
6483                 break;
6484 loop:
6485                 failed_cluster_refill = false;
6486                 failed_alloc = false;
6487                 BUG_ON(index != get_block_group_index(block_group));
6488                 btrfs_put_block_group(block_group);
6489         }
6490         up_read(&space_info->groups_sem);
6491
6492         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6493                 goto search;
6494
6495         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6496                 goto search;
6497
6498         /*
6499          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6500          *                      caching kthreads as we move along
6501          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6502          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6503          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6504          *                      again
6505          */
6506         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6507                 index = 0;
6508                 loop++;
6509                 if (loop == LOOP_ALLOC_CHUNK) {
6510                         struct btrfs_trans_handle *trans;
6511
6512                         trans = btrfs_join_transaction(root);
6513                         if (IS_ERR(trans)) {
6514                                 ret = PTR_ERR(trans);
6515                                 goto out;
6516                         }
6517
6518                         ret = do_chunk_alloc(trans, root, flags,
6519                                              CHUNK_ALLOC_FORCE);
6520                         /*
6521                          * Do not bail out on ENOSPC since we
6522                          * can do more things.
6523                          */
6524                         if (ret < 0 && ret != -ENOSPC)
6525                                 btrfs_abort_transaction(trans,
6526                                                         root, ret);
6527                         else
6528                                 ret = 0;
6529                         btrfs_end_transaction(trans, root);
6530                         if (ret)
6531                                 goto out;
6532                 }
6533
6534                 if (loop == LOOP_NO_EMPTY_SIZE) {
6535                         empty_size = 0;
6536                         empty_cluster = 0;
6537                 }
6538
6539                 goto search;
6540         } else if (!ins->objectid) {
6541                 ret = -ENOSPC;
6542         } else if (ins->objectid) {
6543                 ret = 0;
6544         }
6545 out:
6546         if (ret == -ENOSPC)
6547                 ins->offset = max_extent_size;
6548         return ret;
6549 }
6550
6551 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6552                             int dump_block_groups)
6553 {
6554         struct btrfs_block_group_cache *cache;
6555         int index = 0;
6556
6557         spin_lock(&info->lock);
6558         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6559                info->flags,
6560                info->total_bytes - info->bytes_used - info->bytes_pinned -
6561                info->bytes_reserved - info->bytes_readonly,
6562                (info->full) ? "" : "not ");
6563         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6564                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6565                info->total_bytes, info->bytes_used, info->bytes_pinned,
6566                info->bytes_reserved, info->bytes_may_use,
6567                info->bytes_readonly);
6568         spin_unlock(&info->lock);
6569
6570         if (!dump_block_groups)
6571                 return;
6572
6573         down_read(&info->groups_sem);
6574 again:
6575         list_for_each_entry(cache, &info->block_groups[index], list) {
6576                 spin_lock(&cache->lock);
6577                 printk(KERN_INFO "BTRFS: "
6578                            "block group %llu has %llu bytes, "
6579                            "%llu used %llu pinned %llu reserved %s\n",
6580                        cache->key.objectid, cache->key.offset,
6581                        btrfs_block_group_used(&cache->item), cache->pinned,
6582                        cache->reserved, cache->ro ? "[readonly]" : "");
6583                 btrfs_dump_free_space(cache, bytes);
6584                 spin_unlock(&cache->lock);
6585         }
6586         if (++index < BTRFS_NR_RAID_TYPES)
6587                 goto again;
6588         up_read(&info->groups_sem);
6589 }
6590
6591 int btrfs_reserve_extent(struct btrfs_root *root,
6592                          u64 num_bytes, u64 min_alloc_size,
6593                          u64 empty_size, u64 hint_byte,
6594                          struct btrfs_key *ins, int is_data)
6595 {
6596         bool final_tried = false;
6597         u64 flags;
6598         int ret;
6599
6600         flags = btrfs_get_alloc_profile(root, is_data);
6601 again:
6602         WARN_ON(num_bytes < root->sectorsize);
6603         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6604                                flags);
6605
6606         if (ret == -ENOSPC) {
6607                 if (!final_tried && ins->offset) {
6608                         num_bytes = min(num_bytes >> 1, ins->offset);
6609                         num_bytes = round_down(num_bytes, root->sectorsize);
6610                         num_bytes = max(num_bytes, min_alloc_size);
6611                         if (num_bytes == min_alloc_size)
6612                                 final_tried = true;
6613                         goto again;
6614                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6615                         struct btrfs_space_info *sinfo;
6616
6617                         sinfo = __find_space_info(root->fs_info, flags);
6618                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6619                                 flags, num_bytes);
6620                         if (sinfo)
6621                                 dump_space_info(sinfo, num_bytes, 1);
6622                 }
6623         }
6624
6625         return ret;
6626 }
6627
6628 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6629                                         u64 start, u64 len, int pin)
6630 {
6631         struct btrfs_block_group_cache *cache;
6632         int ret = 0;
6633
6634         cache = btrfs_lookup_block_group(root->fs_info, start);
6635         if (!cache) {
6636                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6637                         start);
6638                 return -ENOSPC;
6639         }
6640
6641         if (btrfs_test_opt(root, DISCARD))
6642                 ret = btrfs_discard_extent(root, start, len, NULL);
6643
6644         if (pin)
6645                 pin_down_extent(root, cache, start, len, 1);
6646         else {
6647                 btrfs_add_free_space(cache, start, len);
6648                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6649         }
6650         btrfs_put_block_group(cache);
6651
6652         trace_btrfs_reserved_extent_free(root, start, len);
6653
6654         return ret;
6655 }
6656
6657 int btrfs_free_reserved_extent(struct btrfs_root *root,
6658                                         u64 start, u64 len)
6659 {
6660         return __btrfs_free_reserved_extent(root, start, len, 0);
6661 }
6662
6663 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6664                                        u64 start, u64 len)
6665 {
6666         return __btrfs_free_reserved_extent(root, start, len, 1);
6667 }
6668
6669 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6670                                       struct btrfs_root *root,
6671                                       u64 parent, u64 root_objectid,
6672                                       u64 flags, u64 owner, u64 offset,
6673                                       struct btrfs_key *ins, int ref_mod)
6674 {
6675         int ret;
6676         struct btrfs_fs_info *fs_info = root->fs_info;
6677         struct btrfs_extent_item *extent_item;
6678         struct btrfs_extent_inline_ref *iref;
6679         struct btrfs_path *path;
6680         struct extent_buffer *leaf;
6681         int type;
6682         u32 size;
6683
6684         if (parent > 0)
6685                 type = BTRFS_SHARED_DATA_REF_KEY;
6686         else
6687                 type = BTRFS_EXTENT_DATA_REF_KEY;
6688
6689         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6690
6691         path = btrfs_alloc_path();
6692         if (!path)
6693                 return -ENOMEM;
6694
6695         path->leave_spinning = 1;
6696         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6697                                       ins, size);
6698         if (ret) {
6699                 btrfs_free_path(path);
6700                 return ret;
6701         }
6702
6703         leaf = path->nodes[0];
6704         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6705                                      struct btrfs_extent_item);
6706         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6707         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6708         btrfs_set_extent_flags(leaf, extent_item,
6709                                flags | BTRFS_EXTENT_FLAG_DATA);
6710
6711         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6712         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6713         if (parent > 0) {
6714                 struct btrfs_shared_data_ref *ref;
6715                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6716                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6717                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6718         } else {
6719                 struct btrfs_extent_data_ref *ref;
6720                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6721                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6722                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6723                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6724                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6725         }
6726
6727         btrfs_mark_buffer_dirty(path->nodes[0]);
6728         btrfs_free_path(path);
6729
6730         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6731         if (ret) { /* -ENOENT, logic error */
6732                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6733                         ins->objectid, ins->offset);
6734                 BUG();
6735         }
6736         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6737         return ret;
6738 }
6739
6740 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6741                                      struct btrfs_root *root,
6742                                      u64 parent, u64 root_objectid,
6743                                      u64 flags, struct btrfs_disk_key *key,
6744                                      int level, struct btrfs_key *ins)
6745 {
6746         int ret;
6747         struct btrfs_fs_info *fs_info = root->fs_info;
6748         struct btrfs_extent_item *extent_item;
6749         struct btrfs_tree_block_info *block_info;
6750         struct btrfs_extent_inline_ref *iref;
6751         struct btrfs_path *path;
6752         struct extent_buffer *leaf;
6753         u32 size = sizeof(*extent_item) + sizeof(*iref);
6754         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6755                                                  SKINNY_METADATA);
6756
6757         if (!skinny_metadata)
6758                 size += sizeof(*block_info);
6759
6760         path = btrfs_alloc_path();
6761         if (!path) {
6762                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6763                                                    root->leafsize);
6764                 return -ENOMEM;
6765         }
6766
6767         path->leave_spinning = 1;
6768         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6769                                       ins, size);
6770         if (ret) {
6771                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6772                                                    root->leafsize);
6773                 btrfs_free_path(path);
6774                 return ret;
6775         }
6776
6777         leaf = path->nodes[0];
6778         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6779                                      struct btrfs_extent_item);
6780         btrfs_set_extent_refs(leaf, extent_item, 1);
6781         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6782         btrfs_set_extent_flags(leaf, extent_item,
6783                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6784
6785         if (skinny_metadata) {
6786                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6787         } else {
6788                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6789                 btrfs_set_tree_block_key(leaf, block_info, key);
6790                 btrfs_set_tree_block_level(leaf, block_info, level);
6791                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6792         }
6793
6794         if (parent > 0) {
6795                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6796                 btrfs_set_extent_inline_ref_type(leaf, iref,
6797                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6798                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6799         } else {
6800                 btrfs_set_extent_inline_ref_type(leaf, iref,
6801                                                  BTRFS_TREE_BLOCK_REF_KEY);
6802                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6803         }
6804
6805         btrfs_mark_buffer_dirty(leaf);
6806         btrfs_free_path(path);
6807
6808         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6809         if (ret) { /* -ENOENT, logic error */
6810                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6811                         ins->objectid, ins->offset);
6812                 BUG();
6813         }
6814
6815         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6816         return ret;
6817 }
6818
6819 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6820                                      struct btrfs_root *root,
6821                                      u64 root_objectid, u64 owner,
6822                                      u64 offset, struct btrfs_key *ins)
6823 {
6824         int ret;
6825
6826         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6827
6828         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6829                                          ins->offset, 0,
6830                                          root_objectid, owner, offset,
6831                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6832         return ret;
6833 }
6834
6835 /*
6836  * this is used by the tree logging recovery code.  It records that
6837  * an extent has been allocated and makes sure to clear the free
6838  * space cache bits as well
6839  */
6840 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6841                                    struct btrfs_root *root,
6842                                    u64 root_objectid, u64 owner, u64 offset,
6843                                    struct btrfs_key *ins)
6844 {
6845         int ret;
6846         struct btrfs_block_group_cache *block_group;
6847
6848         /*
6849          * Mixed block groups will exclude before processing the log so we only
6850          * need to do the exlude dance if this fs isn't mixed.
6851          */
6852         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6853                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6854                 if (ret)
6855                         return ret;
6856         }
6857
6858         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6859         if (!block_group)
6860                 return -EINVAL;
6861
6862         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6863                                           RESERVE_ALLOC_NO_ACCOUNT);
6864         BUG_ON(ret); /* logic error */
6865         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6866                                          0, owner, offset, ins, 1);
6867         btrfs_put_block_group(block_group);
6868         return ret;
6869 }
6870
6871 static struct extent_buffer *
6872 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6873                       u64 bytenr, u32 blocksize, int level)
6874 {
6875         struct extent_buffer *buf;
6876
6877         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6878         if (!buf)
6879                 return ERR_PTR(-ENOMEM);
6880         btrfs_set_header_generation(buf, trans->transid);
6881         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6882         btrfs_tree_lock(buf);
6883         clean_tree_block(trans, root, buf);
6884         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6885
6886         btrfs_set_lock_blocking(buf);
6887         btrfs_set_buffer_uptodate(buf);
6888
6889         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6890                 /*
6891                  * we allow two log transactions at a time, use different
6892                  * EXENT bit to differentiate dirty pages.
6893                  */
6894                 if (root->log_transid % 2 == 0)
6895                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6896                                         buf->start + buf->len - 1, GFP_NOFS);
6897                 else
6898                         set_extent_new(&root->dirty_log_pages, buf->start,
6899                                         buf->start + buf->len - 1, GFP_NOFS);
6900         } else {
6901                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6902                          buf->start + buf->len - 1, GFP_NOFS);
6903         }
6904         trans->blocks_used++;
6905         /* this returns a buffer locked for blocking */
6906         return buf;
6907 }
6908
6909 static struct btrfs_block_rsv *
6910 use_block_rsv(struct btrfs_trans_handle *trans,
6911               struct btrfs_root *root, u32 blocksize)
6912 {
6913         struct btrfs_block_rsv *block_rsv;
6914         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6915         int ret;
6916         bool global_updated = false;
6917
6918         block_rsv = get_block_rsv(trans, root);
6919
6920         if (unlikely(block_rsv->size == 0))
6921                 goto try_reserve;
6922 again:
6923         ret = block_rsv_use_bytes(block_rsv, blocksize);
6924         if (!ret)
6925                 return block_rsv;
6926
6927         if (block_rsv->failfast)
6928                 return ERR_PTR(ret);
6929
6930         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6931                 global_updated = true;
6932                 update_global_block_rsv(root->fs_info);
6933                 goto again;
6934         }
6935
6936         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6937                 static DEFINE_RATELIMIT_STATE(_rs,
6938                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6939                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6940                 if (__ratelimit(&_rs))
6941                         WARN(1, KERN_DEBUG
6942                                 "BTRFS: block rsv returned %d\n", ret);
6943         }
6944 try_reserve:
6945         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6946                                      BTRFS_RESERVE_NO_FLUSH);
6947         if (!ret)
6948                 return block_rsv;
6949         /*
6950          * If we couldn't reserve metadata bytes try and use some from
6951          * the global reserve if its space type is the same as the global
6952          * reservation.
6953          */
6954         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6955             block_rsv->space_info == global_rsv->space_info) {
6956                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6957                 if (!ret)
6958                         return global_rsv;
6959         }
6960         return ERR_PTR(ret);
6961 }
6962
6963 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6964                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6965 {
6966         block_rsv_add_bytes(block_rsv, blocksize, 0);
6967         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6968 }
6969
6970 /*
6971  * finds a free extent and does all the dirty work required for allocation
6972  * returns the key for the extent through ins, and a tree buffer for
6973  * the first block of the extent through buf.
6974  *
6975  * returns the tree buffer or NULL.
6976  */
6977 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6978                                         struct btrfs_root *root, u32 blocksize,
6979                                         u64 parent, u64 root_objectid,
6980                                         struct btrfs_disk_key *key, int level,
6981                                         u64 hint, u64 empty_size)
6982 {
6983         struct btrfs_key ins;
6984         struct btrfs_block_rsv *block_rsv;
6985         struct extent_buffer *buf;
6986         u64 flags = 0;
6987         int ret;
6988         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6989                                                  SKINNY_METADATA);
6990
6991         block_rsv = use_block_rsv(trans, root, blocksize);
6992         if (IS_ERR(block_rsv))
6993                 return ERR_CAST(block_rsv);
6994
6995         ret = btrfs_reserve_extent(root, blocksize, blocksize,
6996                                    empty_size, hint, &ins, 0);
6997         if (ret) {
6998                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6999                 return ERR_PTR(ret);
7000         }
7001
7002         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7003                                     blocksize, level);
7004         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7005
7006         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7007                 if (parent == 0)
7008                         parent = ins.objectid;
7009                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7010         } else
7011                 BUG_ON(parent > 0);
7012
7013         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7014                 struct btrfs_delayed_extent_op *extent_op;
7015                 extent_op = btrfs_alloc_delayed_extent_op();
7016                 BUG_ON(!extent_op); /* -ENOMEM */
7017                 if (key)
7018                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7019                 else
7020                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7021                 extent_op->flags_to_set = flags;
7022                 if (skinny_metadata)
7023                         extent_op->update_key = 0;
7024                 else
7025                         extent_op->update_key = 1;
7026                 extent_op->update_flags = 1;
7027                 extent_op->is_data = 0;
7028                 extent_op->level = level;
7029
7030                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7031                                         ins.objectid,
7032                                         ins.offset, parent, root_objectid,
7033                                         level, BTRFS_ADD_DELAYED_EXTENT,
7034                                         extent_op, 0);
7035                 BUG_ON(ret); /* -ENOMEM */
7036         }
7037         return buf;
7038 }
7039
7040 struct walk_control {
7041         u64 refs[BTRFS_MAX_LEVEL];
7042         u64 flags[BTRFS_MAX_LEVEL];
7043         struct btrfs_key update_progress;
7044         int stage;
7045         int level;
7046         int shared_level;
7047         int update_ref;
7048         int keep_locks;
7049         int reada_slot;
7050         int reada_count;
7051         int for_reloc;
7052 };
7053
7054 #define DROP_REFERENCE  1
7055 #define UPDATE_BACKREF  2
7056
7057 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7058                                      struct btrfs_root *root,
7059                                      struct walk_control *wc,
7060                                      struct btrfs_path *path)
7061 {
7062         u64 bytenr;
7063         u64 generation;
7064         u64 refs;
7065         u64 flags;
7066         u32 nritems;
7067         u32 blocksize;
7068         struct btrfs_key key;
7069         struct extent_buffer *eb;
7070         int ret;
7071         int slot;
7072         int nread = 0;
7073
7074         if (path->slots[wc->level] < wc->reada_slot) {
7075                 wc->reada_count = wc->reada_count * 2 / 3;
7076                 wc->reada_count = max(wc->reada_count, 2);
7077         } else {
7078                 wc->reada_count = wc->reada_count * 3 / 2;
7079                 wc->reada_count = min_t(int, wc->reada_count,
7080                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7081         }
7082
7083         eb = path->nodes[wc->level];
7084         nritems = btrfs_header_nritems(eb);
7085         blocksize = btrfs_level_size(root, wc->level - 1);
7086
7087         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7088                 if (nread >= wc->reada_count)
7089                         break;
7090
7091                 cond_resched();
7092                 bytenr = btrfs_node_blockptr(eb, slot);
7093                 generation = btrfs_node_ptr_generation(eb, slot);
7094
7095                 if (slot == path->slots[wc->level])
7096                         goto reada;
7097
7098                 if (wc->stage == UPDATE_BACKREF &&
7099                     generation <= root->root_key.offset)
7100                         continue;
7101
7102                 /* We don't lock the tree block, it's OK to be racy here */
7103                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7104                                                wc->level - 1, 1, &refs,
7105                                                &flags);
7106                 /* We don't care about errors in readahead. */
7107                 if (ret < 0)
7108                         continue;
7109                 BUG_ON(refs == 0);
7110
7111                 if (wc->stage == DROP_REFERENCE) {
7112                         if (refs == 1)
7113                                 goto reada;
7114
7115                         if (wc->level == 1 &&
7116                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7117                                 continue;
7118                         if (!wc->update_ref ||
7119                             generation <= root->root_key.offset)
7120                                 continue;
7121                         btrfs_node_key_to_cpu(eb, &key, slot);
7122                         ret = btrfs_comp_cpu_keys(&key,
7123                                                   &wc->update_progress);
7124                         if (ret < 0)
7125                                 continue;
7126                 } else {
7127                         if (wc->level == 1 &&
7128                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7129                                 continue;
7130                 }
7131 reada:
7132                 ret = readahead_tree_block(root, bytenr, blocksize,
7133                                            generation);
7134                 if (ret)
7135                         break;
7136                 nread++;
7137         }
7138         wc->reada_slot = slot;
7139 }
7140
7141 /*
7142  * helper to process tree block while walking down the tree.
7143  *
7144  * when wc->stage == UPDATE_BACKREF, this function updates
7145  * back refs for pointers in the block.
7146  *
7147  * NOTE: return value 1 means we should stop walking down.
7148  */
7149 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7150                                    struct btrfs_root *root,
7151                                    struct btrfs_path *path,
7152                                    struct walk_control *wc, int lookup_info)
7153 {
7154         int level = wc->level;
7155         struct extent_buffer *eb = path->nodes[level];
7156         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7157         int ret;
7158
7159         if (wc->stage == UPDATE_BACKREF &&
7160             btrfs_header_owner(eb) != root->root_key.objectid)
7161                 return 1;
7162
7163         /*
7164          * when reference count of tree block is 1, it won't increase
7165          * again. once full backref flag is set, we never clear it.
7166          */
7167         if (lookup_info &&
7168             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7169              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7170                 BUG_ON(!path->locks[level]);
7171                 ret = btrfs_lookup_extent_info(trans, root,
7172                                                eb->start, level, 1,
7173                                                &wc->refs[level],
7174                                                &wc->flags[level]);
7175                 BUG_ON(ret == -ENOMEM);
7176                 if (ret)
7177                         return ret;
7178                 BUG_ON(wc->refs[level] == 0);
7179         }
7180
7181         if (wc->stage == DROP_REFERENCE) {
7182                 if (wc->refs[level] > 1)
7183                         return 1;
7184
7185                 if (path->locks[level] && !wc->keep_locks) {
7186                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7187                         path->locks[level] = 0;
7188                 }
7189                 return 0;
7190         }
7191
7192         /* wc->stage == UPDATE_BACKREF */
7193         if (!(wc->flags[level] & flag)) {
7194                 BUG_ON(!path->locks[level]);
7195                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7196                 BUG_ON(ret); /* -ENOMEM */
7197                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7198                 BUG_ON(ret); /* -ENOMEM */
7199                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7200                                                   eb->len, flag,
7201                                                   btrfs_header_level(eb), 0);
7202                 BUG_ON(ret); /* -ENOMEM */
7203                 wc->flags[level] |= flag;
7204         }
7205
7206         /*
7207          * the block is shared by multiple trees, so it's not good to
7208          * keep the tree lock
7209          */
7210         if (path->locks[level] && level > 0) {
7211                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7212                 path->locks[level] = 0;
7213         }
7214         return 0;
7215 }
7216
7217 /*
7218  * helper to process tree block pointer.
7219  *
7220  * when wc->stage == DROP_REFERENCE, this function checks
7221  * reference count of the block pointed to. if the block
7222  * is shared and we need update back refs for the subtree
7223  * rooted at the block, this function changes wc->stage to
7224  * UPDATE_BACKREF. if the block is shared and there is no
7225  * need to update back, this function drops the reference
7226  * to the block.
7227  *
7228  * NOTE: return value 1 means we should stop walking down.
7229  */
7230 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7231                                  struct btrfs_root *root,
7232                                  struct btrfs_path *path,
7233                                  struct walk_control *wc, int *lookup_info)
7234 {
7235         u64 bytenr;
7236         u64 generation;
7237         u64 parent;
7238         u32 blocksize;
7239         struct btrfs_key key;
7240         struct extent_buffer *next;
7241         int level = wc->level;
7242         int reada = 0;
7243         int ret = 0;
7244
7245         generation = btrfs_node_ptr_generation(path->nodes[level],
7246                                                path->slots[level]);
7247         /*
7248          * if the lower level block was created before the snapshot
7249          * was created, we know there is no need to update back refs
7250          * for the subtree
7251          */
7252         if (wc->stage == UPDATE_BACKREF &&
7253             generation <= root->root_key.offset) {
7254                 *lookup_info = 1;
7255                 return 1;
7256         }
7257
7258         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7259         blocksize = btrfs_level_size(root, level - 1);
7260
7261         next = btrfs_find_tree_block(root, bytenr, blocksize);
7262         if (!next) {
7263                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7264                 if (!next)
7265                         return -ENOMEM;
7266                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7267                                                level - 1);
7268                 reada = 1;
7269         }
7270         btrfs_tree_lock(next);
7271         btrfs_set_lock_blocking(next);
7272
7273         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7274                                        &wc->refs[level - 1],
7275                                        &wc->flags[level - 1]);
7276         if (ret < 0) {
7277                 btrfs_tree_unlock(next);
7278                 return ret;
7279         }
7280
7281         if (unlikely(wc->refs[level - 1] == 0)) {
7282                 btrfs_err(root->fs_info, "Missing references.");
7283                 BUG();
7284         }
7285         *lookup_info = 0;
7286
7287         if (wc->stage == DROP_REFERENCE) {
7288                 if (wc->refs[level - 1] > 1) {
7289                         if (level == 1 &&
7290                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7291                                 goto skip;
7292
7293                         if (!wc->update_ref ||
7294                             generation <= root->root_key.offset)
7295                                 goto skip;
7296
7297                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7298                                               path->slots[level]);
7299                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7300                         if (ret < 0)
7301                                 goto skip;
7302
7303                         wc->stage = UPDATE_BACKREF;
7304                         wc->shared_level = level - 1;
7305                 }
7306         } else {
7307                 if (level == 1 &&
7308                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7309                         goto skip;
7310         }
7311
7312         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7313                 btrfs_tree_unlock(next);
7314                 free_extent_buffer(next);
7315                 next = NULL;
7316                 *lookup_info = 1;
7317         }
7318
7319         if (!next) {
7320                 if (reada && level == 1)
7321                         reada_walk_down(trans, root, wc, path);
7322                 next = read_tree_block(root, bytenr, blocksize, generation);
7323                 if (!next || !extent_buffer_uptodate(next)) {
7324                         free_extent_buffer(next);
7325                         return -EIO;
7326                 }
7327                 btrfs_tree_lock(next);
7328                 btrfs_set_lock_blocking(next);
7329         }
7330
7331         level--;
7332         BUG_ON(level != btrfs_header_level(next));
7333         path->nodes[level] = next;
7334         path->slots[level] = 0;
7335         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7336         wc->level = level;
7337         if (wc->level == 1)
7338                 wc->reada_slot = 0;
7339         return 0;
7340 skip:
7341         wc->refs[level - 1] = 0;
7342         wc->flags[level - 1] = 0;
7343         if (wc->stage == DROP_REFERENCE) {
7344                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7345                         parent = path->nodes[level]->start;
7346                 } else {
7347                         BUG_ON(root->root_key.objectid !=
7348                                btrfs_header_owner(path->nodes[level]));
7349                         parent = 0;
7350                 }
7351
7352                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7353                                 root->root_key.objectid, level - 1, 0, 0);
7354                 BUG_ON(ret); /* -ENOMEM */
7355         }
7356         btrfs_tree_unlock(next);
7357         free_extent_buffer(next);
7358         *lookup_info = 1;
7359         return 1;
7360 }
7361
7362 /*
7363  * helper to process tree block while walking up the tree.
7364  *
7365  * when wc->stage == DROP_REFERENCE, this function drops
7366  * reference count on the block.
7367  *
7368  * when wc->stage == UPDATE_BACKREF, this function changes
7369  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7370  * to UPDATE_BACKREF previously while processing the block.
7371  *
7372  * NOTE: return value 1 means we should stop walking up.
7373  */
7374 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7375                                  struct btrfs_root *root,
7376                                  struct btrfs_path *path,
7377                                  struct walk_control *wc)
7378 {
7379         int ret;
7380         int level = wc->level;
7381         struct extent_buffer *eb = path->nodes[level];
7382         u64 parent = 0;
7383
7384         if (wc->stage == UPDATE_BACKREF) {
7385                 BUG_ON(wc->shared_level < level);
7386                 if (level < wc->shared_level)
7387                         goto out;
7388
7389                 ret = find_next_key(path, level + 1, &wc->update_progress);
7390                 if (ret > 0)
7391                         wc->update_ref = 0;
7392
7393                 wc->stage = DROP_REFERENCE;
7394                 wc->shared_level = -1;
7395                 path->slots[level] = 0;
7396
7397                 /*
7398                  * check reference count again if the block isn't locked.
7399                  * we should start walking down the tree again if reference
7400                  * count is one.
7401                  */
7402                 if (!path->locks[level]) {
7403                         BUG_ON(level == 0);
7404                         btrfs_tree_lock(eb);
7405                         btrfs_set_lock_blocking(eb);
7406                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7407
7408                         ret = btrfs_lookup_extent_info(trans, root,
7409                                                        eb->start, level, 1,
7410                                                        &wc->refs[level],
7411                                                        &wc->flags[level]);
7412                         if (ret < 0) {
7413                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7414                                 path->locks[level] = 0;
7415                                 return ret;
7416                         }
7417                         BUG_ON(wc->refs[level] == 0);
7418                         if (wc->refs[level] == 1) {
7419                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7420                                 path->locks[level] = 0;
7421                                 return 1;
7422                         }
7423                 }
7424         }
7425
7426         /* wc->stage == DROP_REFERENCE */
7427         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7428
7429         if (wc->refs[level] == 1) {
7430                 if (level == 0) {
7431                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7432                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7433                                                     wc->for_reloc);
7434                         else
7435                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7436                                                     wc->for_reloc);
7437                         BUG_ON(ret); /* -ENOMEM */
7438                 }
7439                 /* make block locked assertion in clean_tree_block happy */
7440                 if (!path->locks[level] &&
7441                     btrfs_header_generation(eb) == trans->transid) {
7442                         btrfs_tree_lock(eb);
7443                         btrfs_set_lock_blocking(eb);
7444                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7445                 }
7446                 clean_tree_block(trans, root, eb);
7447         }
7448
7449         if (eb == root->node) {
7450                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7451                         parent = eb->start;
7452                 else
7453                         BUG_ON(root->root_key.objectid !=
7454                                btrfs_header_owner(eb));
7455         } else {
7456                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7457                         parent = path->nodes[level + 1]->start;
7458                 else
7459                         BUG_ON(root->root_key.objectid !=
7460                                btrfs_header_owner(path->nodes[level + 1]));
7461         }
7462
7463         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7464 out:
7465         wc->refs[level] = 0;
7466         wc->flags[level] = 0;
7467         return 0;
7468 }
7469
7470 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7471                                    struct btrfs_root *root,
7472                                    struct btrfs_path *path,
7473                                    struct walk_control *wc)
7474 {
7475         int level = wc->level;
7476         int lookup_info = 1;
7477         int ret;
7478
7479         while (level >= 0) {
7480                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7481                 if (ret > 0)
7482                         break;
7483
7484                 if (level == 0)
7485                         break;
7486
7487                 if (path->slots[level] >=
7488                     btrfs_header_nritems(path->nodes[level]))
7489                         break;
7490
7491                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7492                 if (ret > 0) {
7493                         path->slots[level]++;
7494                         continue;
7495                 } else if (ret < 0)
7496                         return ret;
7497                 level = wc->level;
7498         }
7499         return 0;
7500 }
7501
7502 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7503                                  struct btrfs_root *root,
7504                                  struct btrfs_path *path,
7505                                  struct walk_control *wc, int max_level)
7506 {
7507         int level = wc->level;
7508         int ret;
7509
7510         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7511         while (level < max_level && path->nodes[level]) {
7512                 wc->level = level;
7513                 if (path->slots[level] + 1 <
7514                     btrfs_header_nritems(path->nodes[level])) {
7515                         path->slots[level]++;
7516                         return 0;
7517                 } else {
7518                         ret = walk_up_proc(trans, root, path, wc);
7519                         if (ret > 0)
7520                                 return 0;
7521
7522                         if (path->locks[level]) {
7523                                 btrfs_tree_unlock_rw(path->nodes[level],
7524                                                      path->locks[level]);
7525                                 path->locks[level] = 0;
7526                         }
7527                         free_extent_buffer(path->nodes[level]);
7528                         path->nodes[level] = NULL;
7529                         level++;
7530                 }
7531         }
7532         return 1;
7533 }
7534
7535 /*
7536  * drop a subvolume tree.
7537  *
7538  * this function traverses the tree freeing any blocks that only
7539  * referenced by the tree.
7540  *
7541  * when a shared tree block is found. this function decreases its
7542  * reference count by one. if update_ref is true, this function
7543  * also make sure backrefs for the shared block and all lower level
7544  * blocks are properly updated.
7545  *
7546  * If called with for_reloc == 0, may exit early with -EAGAIN
7547  */
7548 int btrfs_drop_snapshot(struct btrfs_root *root,
7549                          struct btrfs_block_rsv *block_rsv, int update_ref,
7550                          int for_reloc)
7551 {
7552         struct btrfs_path *path;
7553         struct btrfs_trans_handle *trans;
7554         struct btrfs_root *tree_root = root->fs_info->tree_root;
7555         struct btrfs_root_item *root_item = &root->root_item;
7556         struct walk_control *wc;
7557         struct btrfs_key key;
7558         int err = 0;
7559         int ret;
7560         int level;
7561         bool root_dropped = false;
7562
7563         path = btrfs_alloc_path();
7564         if (!path) {
7565                 err = -ENOMEM;
7566                 goto out;
7567         }
7568
7569         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7570         if (!wc) {
7571                 btrfs_free_path(path);
7572                 err = -ENOMEM;
7573                 goto out;
7574         }
7575
7576         trans = btrfs_start_transaction(tree_root, 0);
7577         if (IS_ERR(trans)) {
7578                 err = PTR_ERR(trans);
7579                 goto out_free;
7580         }
7581
7582         if (block_rsv)
7583                 trans->block_rsv = block_rsv;
7584
7585         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7586                 level = btrfs_header_level(root->node);
7587                 path->nodes[level] = btrfs_lock_root_node(root);
7588                 btrfs_set_lock_blocking(path->nodes[level]);
7589                 path->slots[level] = 0;
7590                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7591                 memset(&wc->update_progress, 0,
7592                        sizeof(wc->update_progress));
7593         } else {
7594                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7595                 memcpy(&wc->update_progress, &key,
7596                        sizeof(wc->update_progress));
7597
7598                 level = root_item->drop_level;
7599                 BUG_ON(level == 0);
7600                 path->lowest_level = level;
7601                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7602                 path->lowest_level = 0;
7603                 if (ret < 0) {
7604                         err = ret;
7605                         goto out_end_trans;
7606                 }
7607                 WARN_ON(ret > 0);
7608
7609                 /*
7610                  * unlock our path, this is safe because only this
7611                  * function is allowed to delete this snapshot
7612                  */
7613                 btrfs_unlock_up_safe(path, 0);
7614
7615                 level = btrfs_header_level(root->node);
7616                 while (1) {
7617                         btrfs_tree_lock(path->nodes[level]);
7618                         btrfs_set_lock_blocking(path->nodes[level]);
7619                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7620
7621                         ret = btrfs_lookup_extent_info(trans, root,
7622                                                 path->nodes[level]->start,
7623                                                 level, 1, &wc->refs[level],
7624                                                 &wc->flags[level]);
7625                         if (ret < 0) {
7626                                 err = ret;
7627                                 goto out_end_trans;
7628                         }
7629                         BUG_ON(wc->refs[level] == 0);
7630
7631                         if (level == root_item->drop_level)
7632                                 break;
7633
7634                         btrfs_tree_unlock(path->nodes[level]);
7635                         path->locks[level] = 0;
7636                         WARN_ON(wc->refs[level] != 1);
7637                         level--;
7638                 }
7639         }
7640
7641         wc->level = level;
7642         wc->shared_level = -1;
7643         wc->stage = DROP_REFERENCE;
7644         wc->update_ref = update_ref;
7645         wc->keep_locks = 0;
7646         wc->for_reloc = for_reloc;
7647         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7648
7649         while (1) {
7650
7651                 ret = walk_down_tree(trans, root, path, wc);
7652                 if (ret < 0) {
7653                         err = ret;
7654                         break;
7655                 }
7656
7657                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7658                 if (ret < 0) {
7659                         err = ret;
7660                         break;
7661                 }
7662
7663                 if (ret > 0) {
7664                         BUG_ON(wc->stage != DROP_REFERENCE);
7665                         break;
7666                 }
7667
7668                 if (wc->stage == DROP_REFERENCE) {
7669                         level = wc->level;
7670                         btrfs_node_key(path->nodes[level],
7671                                        &root_item->drop_progress,
7672                                        path->slots[level]);
7673                         root_item->drop_level = level;
7674                 }
7675
7676                 BUG_ON(wc->level == 0);
7677                 if (btrfs_should_end_transaction(trans, tree_root) ||
7678                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7679                         ret = btrfs_update_root(trans, tree_root,
7680                                                 &root->root_key,
7681                                                 root_item);
7682                         if (ret) {
7683                                 btrfs_abort_transaction(trans, tree_root, ret);
7684                                 err = ret;
7685                                 goto out_end_trans;
7686                         }
7687
7688                         btrfs_end_transaction_throttle(trans, tree_root);
7689                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7690                                 pr_debug("BTRFS: drop snapshot early exit\n");
7691                                 err = -EAGAIN;
7692                                 goto out_free;
7693                         }
7694
7695                         trans = btrfs_start_transaction(tree_root, 0);
7696                         if (IS_ERR(trans)) {
7697                                 err = PTR_ERR(trans);
7698                                 goto out_free;
7699                         }
7700                         if (block_rsv)
7701                                 trans->block_rsv = block_rsv;
7702                 }
7703         }
7704         btrfs_release_path(path);
7705         if (err)
7706                 goto out_end_trans;
7707
7708         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7709         if (ret) {
7710                 btrfs_abort_transaction(trans, tree_root, ret);
7711                 goto out_end_trans;
7712         }
7713
7714         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7715                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7716                                       NULL, NULL);
7717                 if (ret < 0) {
7718                         btrfs_abort_transaction(trans, tree_root, ret);
7719                         err = ret;
7720                         goto out_end_trans;
7721                 } else if (ret > 0) {
7722                         /* if we fail to delete the orphan item this time
7723                          * around, it'll get picked up the next time.
7724                          *
7725                          * The most common failure here is just -ENOENT.
7726                          */
7727                         btrfs_del_orphan_item(trans, tree_root,
7728                                               root->root_key.objectid);
7729                 }
7730         }
7731
7732         if (root->in_radix) {
7733                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7734         } else {
7735                 free_extent_buffer(root->node);
7736                 free_extent_buffer(root->commit_root);
7737                 btrfs_put_fs_root(root);
7738         }
7739         root_dropped = true;
7740 out_end_trans:
7741         btrfs_end_transaction_throttle(trans, tree_root);
7742 out_free:
7743         kfree(wc);
7744         btrfs_free_path(path);
7745 out:
7746         /*
7747          * So if we need to stop dropping the snapshot for whatever reason we
7748          * need to make sure to add it back to the dead root list so that we
7749          * keep trying to do the work later.  This also cleans up roots if we
7750          * don't have it in the radix (like when we recover after a power fail
7751          * or unmount) so we don't leak memory.
7752          */
7753         if (!for_reloc && root_dropped == false)
7754                 btrfs_add_dead_root(root);
7755         if (err && err != -EAGAIN)
7756                 btrfs_std_error(root->fs_info, err);
7757         return err;
7758 }
7759
7760 /*
7761  * drop subtree rooted at tree block 'node'.
7762  *
7763  * NOTE: this function will unlock and release tree block 'node'
7764  * only used by relocation code
7765  */
7766 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7767                         struct btrfs_root *root,
7768                         struct extent_buffer *node,
7769                         struct extent_buffer *parent)
7770 {
7771         struct btrfs_path *path;
7772         struct walk_control *wc;
7773         int level;
7774         int parent_level;
7775         int ret = 0;
7776         int wret;
7777
7778         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7779
7780         path = btrfs_alloc_path();
7781         if (!path)
7782                 return -ENOMEM;
7783
7784         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7785         if (!wc) {
7786                 btrfs_free_path(path);
7787                 return -ENOMEM;
7788         }
7789
7790         btrfs_assert_tree_locked(parent);
7791         parent_level = btrfs_header_level(parent);
7792         extent_buffer_get(parent);
7793         path->nodes[parent_level] = parent;
7794         path->slots[parent_level] = btrfs_header_nritems(parent);
7795
7796         btrfs_assert_tree_locked(node);
7797         level = btrfs_header_level(node);
7798         path->nodes[level] = node;
7799         path->slots[level] = 0;
7800         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7801
7802         wc->refs[parent_level] = 1;
7803         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7804         wc->level = level;
7805         wc->shared_level = -1;
7806         wc->stage = DROP_REFERENCE;
7807         wc->update_ref = 0;
7808         wc->keep_locks = 1;
7809         wc->for_reloc = 1;
7810         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7811
7812         while (1) {
7813                 wret = walk_down_tree(trans, root, path, wc);
7814                 if (wret < 0) {
7815                         ret = wret;
7816                         break;
7817                 }
7818
7819                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7820                 if (wret < 0)
7821                         ret = wret;
7822                 if (wret != 0)
7823                         break;
7824         }
7825
7826         kfree(wc);
7827         btrfs_free_path(path);
7828         return ret;
7829 }
7830
7831 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7832 {
7833         u64 num_devices;
7834         u64 stripped;
7835
7836         /*
7837          * if restripe for this chunk_type is on pick target profile and
7838          * return, otherwise do the usual balance
7839          */
7840         stripped = get_restripe_target(root->fs_info, flags);
7841         if (stripped)
7842                 return extended_to_chunk(stripped);
7843
7844         /*
7845          * we add in the count of missing devices because we want
7846          * to make sure that any RAID levels on a degraded FS
7847          * continue to be honored.
7848          */
7849         num_devices = root->fs_info->fs_devices->rw_devices +
7850                 root->fs_info->fs_devices->missing_devices;
7851
7852         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7853                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7854                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7855
7856         if (num_devices == 1) {
7857                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7858                 stripped = flags & ~stripped;
7859
7860                 /* turn raid0 into single device chunks */
7861                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7862                         return stripped;
7863
7864                 /* turn mirroring into duplication */
7865                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7866                              BTRFS_BLOCK_GROUP_RAID10))
7867                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7868         } else {
7869                 /* they already had raid on here, just return */
7870                 if (flags & stripped)
7871                         return flags;
7872
7873                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7874                 stripped = flags & ~stripped;
7875
7876                 /* switch duplicated blocks with raid1 */
7877                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7878                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7879
7880                 /* this is drive concat, leave it alone */
7881         }
7882
7883         return flags;
7884 }
7885
7886 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7887 {
7888         struct btrfs_space_info *sinfo = cache->space_info;
7889         u64 num_bytes;
7890         u64 min_allocable_bytes;
7891         int ret = -ENOSPC;
7892
7893
7894         /*
7895          * We need some metadata space and system metadata space for
7896          * allocating chunks in some corner cases until we force to set
7897          * it to be readonly.
7898          */
7899         if ((sinfo->flags &
7900              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7901             !force)
7902                 min_allocable_bytes = 1 * 1024 * 1024;
7903         else
7904                 min_allocable_bytes = 0;
7905
7906         spin_lock(&sinfo->lock);
7907         spin_lock(&cache->lock);
7908
7909         if (cache->ro) {
7910                 ret = 0;
7911                 goto out;
7912         }
7913
7914         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7915                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7916
7917         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7918             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7919             min_allocable_bytes <= sinfo->total_bytes) {
7920                 sinfo->bytes_readonly += num_bytes;
7921                 cache->ro = 1;
7922                 ret = 0;
7923         }
7924 out:
7925         spin_unlock(&cache->lock);
7926         spin_unlock(&sinfo->lock);
7927         return ret;
7928 }
7929
7930 int btrfs_set_block_group_ro(struct btrfs_root *root,
7931                              struct btrfs_block_group_cache *cache)
7932
7933 {
7934         struct btrfs_trans_handle *trans;
7935         u64 alloc_flags;
7936         int ret;
7937
7938         BUG_ON(cache->ro);
7939
7940         trans = btrfs_join_transaction(root);
7941         if (IS_ERR(trans))
7942                 return PTR_ERR(trans);
7943
7944         alloc_flags = update_block_group_flags(root, cache->flags);
7945         if (alloc_flags != cache->flags) {
7946                 ret = do_chunk_alloc(trans, root, alloc_flags,
7947                                      CHUNK_ALLOC_FORCE);
7948                 if (ret < 0)
7949                         goto out;
7950         }
7951
7952         ret = set_block_group_ro(cache, 0);
7953         if (!ret)
7954                 goto out;
7955         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7956         ret = do_chunk_alloc(trans, root, alloc_flags,
7957                              CHUNK_ALLOC_FORCE);
7958         if (ret < 0)
7959                 goto out;
7960         ret = set_block_group_ro(cache, 0);
7961 out:
7962         btrfs_end_transaction(trans, root);
7963         return ret;
7964 }
7965
7966 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7967                             struct btrfs_root *root, u64 type)
7968 {
7969         u64 alloc_flags = get_alloc_profile(root, type);
7970         return do_chunk_alloc(trans, root, alloc_flags,
7971                               CHUNK_ALLOC_FORCE);
7972 }
7973
7974 /*
7975  * helper to account the unused space of all the readonly block group in the
7976  * list. takes mirrors into account.
7977  */
7978 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7979 {
7980         struct btrfs_block_group_cache *block_group;
7981         u64 free_bytes = 0;
7982         int factor;
7983
7984         list_for_each_entry(block_group, groups_list, list) {
7985                 spin_lock(&block_group->lock);
7986
7987                 if (!block_group->ro) {
7988                         spin_unlock(&block_group->lock);
7989                         continue;
7990                 }
7991
7992                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7993                                           BTRFS_BLOCK_GROUP_RAID10 |
7994                                           BTRFS_BLOCK_GROUP_DUP))
7995                         factor = 2;
7996                 else
7997                         factor = 1;
7998
7999                 free_bytes += (block_group->key.offset -
8000                                btrfs_block_group_used(&block_group->item)) *
8001                                factor;
8002
8003                 spin_unlock(&block_group->lock);
8004         }
8005
8006         return free_bytes;
8007 }
8008
8009 /*
8010  * helper to account the unused space of all the readonly block group in the
8011  * space_info. takes mirrors into account.
8012  */
8013 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8014 {
8015         int i;
8016         u64 free_bytes = 0;
8017
8018         spin_lock(&sinfo->lock);
8019
8020         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8021                 if (!list_empty(&sinfo->block_groups[i]))
8022                         free_bytes += __btrfs_get_ro_block_group_free_space(
8023                                                 &sinfo->block_groups[i]);
8024
8025         spin_unlock(&sinfo->lock);
8026
8027         return free_bytes;
8028 }
8029
8030 void btrfs_set_block_group_rw(struct btrfs_root *root,
8031                               struct btrfs_block_group_cache *cache)
8032 {
8033         struct btrfs_space_info *sinfo = cache->space_info;
8034         u64 num_bytes;
8035
8036         BUG_ON(!cache->ro);
8037
8038         spin_lock(&sinfo->lock);
8039         spin_lock(&cache->lock);
8040         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8041                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8042         sinfo->bytes_readonly -= num_bytes;
8043         cache->ro = 0;
8044         spin_unlock(&cache->lock);
8045         spin_unlock(&sinfo->lock);
8046 }
8047
8048 /*
8049  * checks to see if its even possible to relocate this block group.
8050  *
8051  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8052  * ok to go ahead and try.
8053  */
8054 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8055 {
8056         struct btrfs_block_group_cache *block_group;
8057         struct btrfs_space_info *space_info;
8058         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8059         struct btrfs_device *device;
8060         struct btrfs_trans_handle *trans;
8061         u64 min_free;
8062         u64 dev_min = 1;
8063         u64 dev_nr = 0;
8064         u64 target;
8065         int index;
8066         int full = 0;
8067         int ret = 0;
8068
8069         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8070
8071         /* odd, couldn't find the block group, leave it alone */
8072         if (!block_group)
8073                 return -1;
8074
8075         min_free = btrfs_block_group_used(&block_group->item);
8076
8077         /* no bytes used, we're good */
8078         if (!min_free)
8079                 goto out;
8080
8081         space_info = block_group->space_info;
8082         spin_lock(&space_info->lock);
8083
8084         full = space_info->full;
8085
8086         /*
8087          * if this is the last block group we have in this space, we can't
8088          * relocate it unless we're able to allocate a new chunk below.
8089          *
8090          * Otherwise, we need to make sure we have room in the space to handle
8091          * all of the extents from this block group.  If we can, we're good
8092          */
8093         if ((space_info->total_bytes != block_group->key.offset) &&
8094             (space_info->bytes_used + space_info->bytes_reserved +
8095              space_info->bytes_pinned + space_info->bytes_readonly +
8096              min_free < space_info->total_bytes)) {
8097                 spin_unlock(&space_info->lock);
8098                 goto out;
8099         }
8100         spin_unlock(&space_info->lock);
8101
8102         /*
8103          * ok we don't have enough space, but maybe we have free space on our
8104          * devices to allocate new chunks for relocation, so loop through our
8105          * alloc devices and guess if we have enough space.  if this block
8106          * group is going to be restriped, run checks against the target
8107          * profile instead of the current one.
8108          */
8109         ret = -1;
8110
8111         /*
8112          * index:
8113          *      0: raid10
8114          *      1: raid1
8115          *      2: dup
8116          *      3: raid0
8117          *      4: single
8118          */
8119         target = get_restripe_target(root->fs_info, block_group->flags);
8120         if (target) {
8121                 index = __get_raid_index(extended_to_chunk(target));
8122         } else {
8123                 /*
8124                  * this is just a balance, so if we were marked as full
8125                  * we know there is no space for a new chunk
8126                  */
8127                 if (full)
8128                         goto out;
8129
8130                 index = get_block_group_index(block_group);
8131         }
8132
8133         if (index == BTRFS_RAID_RAID10) {
8134                 dev_min = 4;
8135                 /* Divide by 2 */
8136                 min_free >>= 1;
8137         } else if (index == BTRFS_RAID_RAID1) {
8138                 dev_min = 2;
8139         } else if (index == BTRFS_RAID_DUP) {
8140                 /* Multiply by 2 */
8141                 min_free <<= 1;
8142         } else if (index == BTRFS_RAID_RAID0) {
8143                 dev_min = fs_devices->rw_devices;
8144                 do_div(min_free, dev_min);
8145         }
8146
8147         /* We need to do this so that we can look at pending chunks */
8148         trans = btrfs_join_transaction(root);
8149         if (IS_ERR(trans)) {
8150                 ret = PTR_ERR(trans);
8151                 goto out;
8152         }
8153
8154         mutex_lock(&root->fs_info->chunk_mutex);
8155         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8156                 u64 dev_offset;
8157
8158                 /*
8159                  * check to make sure we can actually find a chunk with enough
8160                  * space to fit our block group in.
8161                  */
8162                 if (device->total_bytes > device->bytes_used + min_free &&
8163                     !device->is_tgtdev_for_dev_replace) {
8164                         ret = find_free_dev_extent(trans, device, min_free,
8165                                                    &dev_offset, NULL);
8166                         if (!ret)
8167                                 dev_nr++;
8168
8169                         if (dev_nr >= dev_min)
8170                                 break;
8171
8172                         ret = -1;
8173                 }
8174         }
8175         mutex_unlock(&root->fs_info->chunk_mutex);
8176         btrfs_end_transaction(trans, root);
8177 out:
8178         btrfs_put_block_group(block_group);
8179         return ret;
8180 }
8181
8182 static int find_first_block_group(struct btrfs_root *root,
8183                 struct btrfs_path *path, struct btrfs_key *key)
8184 {
8185         int ret = 0;
8186         struct btrfs_key found_key;
8187         struct extent_buffer *leaf;
8188         int slot;
8189
8190         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8191         if (ret < 0)
8192                 goto out;
8193
8194         while (1) {
8195                 slot = path->slots[0];
8196                 leaf = path->nodes[0];
8197                 if (slot >= btrfs_header_nritems(leaf)) {
8198                         ret = btrfs_next_leaf(root, path);
8199                         if (ret == 0)
8200                                 continue;
8201                         if (ret < 0)
8202                                 goto out;
8203                         break;
8204                 }
8205                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8206
8207                 if (found_key.objectid >= key->objectid &&
8208                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8209                         ret = 0;
8210                         goto out;
8211                 }
8212                 path->slots[0]++;
8213         }
8214 out:
8215         return ret;
8216 }
8217
8218 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8219 {
8220         struct btrfs_block_group_cache *block_group;
8221         u64 last = 0;
8222
8223         while (1) {
8224                 struct inode *inode;
8225
8226                 block_group = btrfs_lookup_first_block_group(info, last);
8227                 while (block_group) {
8228                         spin_lock(&block_group->lock);
8229                         if (block_group->iref)
8230                                 break;
8231                         spin_unlock(&block_group->lock);
8232                         block_group = next_block_group(info->tree_root,
8233                                                        block_group);
8234                 }
8235                 if (!block_group) {
8236                         if (last == 0)
8237                                 break;
8238                         last = 0;
8239                         continue;
8240                 }
8241
8242                 inode = block_group->inode;
8243                 block_group->iref = 0;
8244                 block_group->inode = NULL;
8245                 spin_unlock(&block_group->lock);
8246                 iput(inode);
8247                 last = block_group->key.objectid + block_group->key.offset;
8248                 btrfs_put_block_group(block_group);
8249         }
8250 }
8251
8252 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8253 {
8254         struct btrfs_block_group_cache *block_group;
8255         struct btrfs_space_info *space_info;
8256         struct btrfs_caching_control *caching_ctl;
8257         struct rb_node *n;
8258
8259         down_write(&info->commit_root_sem);
8260         while (!list_empty(&info->caching_block_groups)) {
8261                 caching_ctl = list_entry(info->caching_block_groups.next,
8262                                          struct btrfs_caching_control, list);
8263                 list_del(&caching_ctl->list);
8264                 put_caching_control(caching_ctl);
8265         }
8266         up_write(&info->commit_root_sem);
8267
8268         spin_lock(&info->block_group_cache_lock);
8269         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8270                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8271                                        cache_node);
8272                 rb_erase(&block_group->cache_node,
8273                          &info->block_group_cache_tree);
8274                 spin_unlock(&info->block_group_cache_lock);
8275
8276                 down_write(&block_group->space_info->groups_sem);
8277                 list_del(&block_group->list);
8278                 up_write(&block_group->space_info->groups_sem);
8279
8280                 if (block_group->cached == BTRFS_CACHE_STARTED)
8281                         wait_block_group_cache_done(block_group);
8282
8283                 /*
8284                  * We haven't cached this block group, which means we could
8285                  * possibly have excluded extents on this block group.
8286                  */
8287                 if (block_group->cached == BTRFS_CACHE_NO ||
8288                     block_group->cached == BTRFS_CACHE_ERROR)
8289                         free_excluded_extents(info->extent_root, block_group);
8290
8291                 btrfs_remove_free_space_cache(block_group);
8292                 btrfs_put_block_group(block_group);
8293
8294                 spin_lock(&info->block_group_cache_lock);
8295         }
8296         spin_unlock(&info->block_group_cache_lock);
8297
8298         /* now that all the block groups are freed, go through and
8299          * free all the space_info structs.  This is only called during
8300          * the final stages of unmount, and so we know nobody is
8301          * using them.  We call synchronize_rcu() once before we start,
8302          * just to be on the safe side.
8303          */
8304         synchronize_rcu();
8305
8306         release_global_block_rsv(info);
8307
8308         while (!list_empty(&info->space_info)) {
8309                 int i;
8310
8311                 space_info = list_entry(info->space_info.next,
8312                                         struct btrfs_space_info,
8313                                         list);
8314                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8315                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8316                             space_info->bytes_reserved > 0 ||
8317                             space_info->bytes_may_use > 0)) {
8318                                 dump_space_info(space_info, 0, 0);
8319                         }
8320                 }
8321                 list_del(&space_info->list);
8322                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8323                         struct kobject *kobj;
8324                         kobj = &space_info->block_group_kobjs[i];
8325                         if (kobj->parent) {
8326                                 kobject_del(kobj);
8327                                 kobject_put(kobj);
8328                         }
8329                 }
8330                 kobject_del(&space_info->kobj);
8331                 kobject_put(&space_info->kobj);
8332         }
8333         return 0;
8334 }
8335
8336 static void __link_block_group(struct btrfs_space_info *space_info,
8337                                struct btrfs_block_group_cache *cache)
8338 {
8339         int index = get_block_group_index(cache);
8340         bool first = false;
8341
8342         down_write(&space_info->groups_sem);
8343         if (list_empty(&space_info->block_groups[index]))
8344                 first = true;
8345         list_add_tail(&cache->list, &space_info->block_groups[index]);
8346         up_write(&space_info->groups_sem);
8347
8348         if (first) {
8349                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8350                 int ret;
8351
8352                 kobject_get(&space_info->kobj); /* put in release */
8353                 ret = kobject_add(kobj, &space_info->kobj, "%s",
8354                                   get_raid_name(index));
8355                 if (ret) {
8356                         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8357                         kobject_put(&space_info->kobj);
8358                 }
8359         }
8360 }
8361
8362 static struct btrfs_block_group_cache *
8363 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8364 {
8365         struct btrfs_block_group_cache *cache;
8366
8367         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8368         if (!cache)
8369                 return NULL;
8370
8371         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8372                                         GFP_NOFS);
8373         if (!cache->free_space_ctl) {
8374                 kfree(cache);
8375                 return NULL;
8376         }
8377
8378         cache->key.objectid = start;
8379         cache->key.offset = size;
8380         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8381
8382         cache->sectorsize = root->sectorsize;
8383         cache->fs_info = root->fs_info;
8384         cache->full_stripe_len = btrfs_full_stripe_len(root,
8385                                                &root->fs_info->mapping_tree,
8386                                                start);
8387         atomic_set(&cache->count, 1);
8388         spin_lock_init(&cache->lock);
8389         INIT_LIST_HEAD(&cache->list);
8390         INIT_LIST_HEAD(&cache->cluster_list);
8391         INIT_LIST_HEAD(&cache->new_bg_list);
8392         btrfs_init_free_space_ctl(cache);
8393
8394         return cache;
8395 }
8396
8397 int btrfs_read_block_groups(struct btrfs_root *root)
8398 {
8399         struct btrfs_path *path;
8400         int ret;
8401         struct btrfs_block_group_cache *cache;
8402         struct btrfs_fs_info *info = root->fs_info;
8403         struct btrfs_space_info *space_info;
8404         struct btrfs_key key;
8405         struct btrfs_key found_key;
8406         struct extent_buffer *leaf;
8407         int need_clear = 0;
8408         u64 cache_gen;
8409
8410         root = info->extent_root;
8411         key.objectid = 0;
8412         key.offset = 0;
8413         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8414         path = btrfs_alloc_path();
8415         if (!path)
8416                 return -ENOMEM;
8417         path->reada = 1;
8418
8419         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8420         if (btrfs_test_opt(root, SPACE_CACHE) &&
8421             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8422                 need_clear = 1;
8423         if (btrfs_test_opt(root, CLEAR_CACHE))
8424                 need_clear = 1;
8425
8426         while (1) {
8427                 ret = find_first_block_group(root, path, &key);
8428                 if (ret > 0)
8429                         break;
8430                 if (ret != 0)
8431                         goto error;
8432
8433                 leaf = path->nodes[0];
8434                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8435
8436                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8437                                                        found_key.offset);
8438                 if (!cache) {
8439                         ret = -ENOMEM;
8440                         goto error;
8441                 }
8442
8443                 if (need_clear) {
8444                         /*
8445                          * When we mount with old space cache, we need to
8446                          * set BTRFS_DC_CLEAR and set dirty flag.
8447                          *
8448                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8449                          *    truncate the old free space cache inode and
8450                          *    setup a new one.
8451                          * b) Setting 'dirty flag' makes sure that we flush
8452                          *    the new space cache info onto disk.
8453                          */
8454                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8455                         if (btrfs_test_opt(root, SPACE_CACHE))
8456                                 cache->dirty = 1;
8457                 }
8458
8459                 read_extent_buffer(leaf, &cache->item,
8460                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8461                                    sizeof(cache->item));
8462                 cache->flags = btrfs_block_group_flags(&cache->item);
8463
8464                 key.objectid = found_key.objectid + found_key.offset;
8465                 btrfs_release_path(path);
8466
8467                 /*
8468                  * We need to exclude the super stripes now so that the space
8469                  * info has super bytes accounted for, otherwise we'll think
8470                  * we have more space than we actually do.
8471                  */
8472                 ret = exclude_super_stripes(root, cache);
8473                 if (ret) {
8474                         /*
8475                          * We may have excluded something, so call this just in
8476                          * case.
8477                          */
8478                         free_excluded_extents(root, cache);
8479                         btrfs_put_block_group(cache);
8480                         goto error;
8481                 }
8482
8483                 /*
8484                  * check for two cases, either we are full, and therefore
8485                  * don't need to bother with the caching work since we won't
8486                  * find any space, or we are empty, and we can just add all
8487                  * the space in and be done with it.  This saves us _alot_ of
8488                  * time, particularly in the full case.
8489                  */
8490                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8491                         cache->last_byte_to_unpin = (u64)-1;
8492                         cache->cached = BTRFS_CACHE_FINISHED;
8493                         free_excluded_extents(root, cache);
8494                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8495                         cache->last_byte_to_unpin = (u64)-1;
8496                         cache->cached = BTRFS_CACHE_FINISHED;
8497                         add_new_free_space(cache, root->fs_info,
8498                                            found_key.objectid,
8499                                            found_key.objectid +
8500                                            found_key.offset);
8501                         free_excluded_extents(root, cache);
8502                 }
8503
8504                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8505                 if (ret) {
8506                         btrfs_remove_free_space_cache(cache);
8507                         btrfs_put_block_group(cache);
8508                         goto error;
8509                 }
8510
8511                 ret = update_space_info(info, cache->flags, found_key.offset,
8512                                         btrfs_block_group_used(&cache->item),
8513                                         &space_info);
8514                 if (ret) {
8515                         btrfs_remove_free_space_cache(cache);
8516                         spin_lock(&info->block_group_cache_lock);
8517                         rb_erase(&cache->cache_node,
8518                                  &info->block_group_cache_tree);
8519                         spin_unlock(&info->block_group_cache_lock);
8520                         btrfs_put_block_group(cache);
8521                         goto error;
8522                 }
8523
8524                 cache->space_info = space_info;
8525                 spin_lock(&cache->space_info->lock);
8526                 cache->space_info->bytes_readonly += cache->bytes_super;
8527                 spin_unlock(&cache->space_info->lock);
8528
8529                 __link_block_group(space_info, cache);
8530
8531                 set_avail_alloc_bits(root->fs_info, cache->flags);
8532                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8533                         set_block_group_ro(cache, 1);
8534         }
8535
8536         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8537                 if (!(get_alloc_profile(root, space_info->flags) &
8538                       (BTRFS_BLOCK_GROUP_RAID10 |
8539                        BTRFS_BLOCK_GROUP_RAID1 |
8540                        BTRFS_BLOCK_GROUP_RAID5 |
8541                        BTRFS_BLOCK_GROUP_RAID6 |
8542                        BTRFS_BLOCK_GROUP_DUP)))
8543                         continue;
8544                 /*
8545                  * avoid allocating from un-mirrored block group if there are
8546                  * mirrored block groups.
8547                  */
8548                 list_for_each_entry(cache,
8549                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8550                                 list)
8551                         set_block_group_ro(cache, 1);
8552                 list_for_each_entry(cache,
8553                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8554                                 list)
8555                         set_block_group_ro(cache, 1);
8556         }
8557
8558         init_global_block_rsv(info);
8559         ret = 0;
8560 error:
8561         btrfs_free_path(path);
8562         return ret;
8563 }
8564
8565 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8566                                        struct btrfs_root *root)
8567 {
8568         struct btrfs_block_group_cache *block_group, *tmp;
8569         struct btrfs_root *extent_root = root->fs_info->extent_root;
8570         struct btrfs_block_group_item item;
8571         struct btrfs_key key;
8572         int ret = 0;
8573
8574         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8575                                  new_bg_list) {
8576                 list_del_init(&block_group->new_bg_list);
8577
8578                 if (ret)
8579                         continue;
8580
8581                 spin_lock(&block_group->lock);
8582                 memcpy(&item, &block_group->item, sizeof(item));
8583                 memcpy(&key, &block_group->key, sizeof(key));
8584                 spin_unlock(&block_group->lock);
8585
8586                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8587                                         sizeof(item));
8588                 if (ret)
8589                         btrfs_abort_transaction(trans, extent_root, ret);
8590                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8591                                                key.objectid, key.offset);
8592                 if (ret)
8593                         btrfs_abort_transaction(trans, extent_root, ret);
8594         }
8595 }
8596
8597 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8598                            struct btrfs_root *root, u64 bytes_used,
8599                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8600                            u64 size)
8601 {
8602         int ret;
8603         struct btrfs_root *extent_root;
8604         struct btrfs_block_group_cache *cache;
8605
8606         extent_root = root->fs_info->extent_root;
8607
8608         root->fs_info->last_trans_log_full_commit = trans->transid;
8609
8610         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
8611         if (!cache)
8612                 return -ENOMEM;
8613
8614         btrfs_set_block_group_used(&cache->item, bytes_used);
8615         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8616         btrfs_set_block_group_flags(&cache->item, type);
8617
8618         cache->flags = type;
8619         cache->last_byte_to_unpin = (u64)-1;
8620         cache->cached = BTRFS_CACHE_FINISHED;
8621         ret = exclude_super_stripes(root, cache);
8622         if (ret) {
8623                 /*
8624                  * We may have excluded something, so call this just in
8625                  * case.
8626                  */
8627                 free_excluded_extents(root, cache);
8628                 btrfs_put_block_group(cache);
8629                 return ret;
8630         }
8631
8632         add_new_free_space(cache, root->fs_info, chunk_offset,
8633                            chunk_offset + size);
8634
8635         free_excluded_extents(root, cache);
8636
8637         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8638         if (ret) {
8639                 btrfs_remove_free_space_cache(cache);
8640                 btrfs_put_block_group(cache);
8641                 return ret;
8642         }
8643
8644         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8645                                 &cache->space_info);
8646         if (ret) {
8647                 btrfs_remove_free_space_cache(cache);
8648                 spin_lock(&root->fs_info->block_group_cache_lock);
8649                 rb_erase(&cache->cache_node,
8650                          &root->fs_info->block_group_cache_tree);
8651                 spin_unlock(&root->fs_info->block_group_cache_lock);
8652                 btrfs_put_block_group(cache);
8653                 return ret;
8654         }
8655         update_global_block_rsv(root->fs_info);
8656
8657         spin_lock(&cache->space_info->lock);
8658         cache->space_info->bytes_readonly += cache->bytes_super;
8659         spin_unlock(&cache->space_info->lock);
8660
8661         __link_block_group(cache->space_info, cache);
8662
8663         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8664
8665         set_avail_alloc_bits(extent_root->fs_info, type);
8666
8667         return 0;
8668 }
8669
8670 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8671 {
8672         u64 extra_flags = chunk_to_extended(flags) &
8673                                 BTRFS_EXTENDED_PROFILE_MASK;
8674
8675         write_seqlock(&fs_info->profiles_lock);
8676         if (flags & BTRFS_BLOCK_GROUP_DATA)
8677                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8678         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8679                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8680         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8681                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8682         write_sequnlock(&fs_info->profiles_lock);
8683 }
8684
8685 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8686                              struct btrfs_root *root, u64 group_start)
8687 {
8688         struct btrfs_path *path;
8689         struct btrfs_block_group_cache *block_group;
8690         struct btrfs_free_cluster *cluster;
8691         struct btrfs_root *tree_root = root->fs_info->tree_root;
8692         struct btrfs_key key;
8693         struct inode *inode;
8694         int ret;
8695         int index;
8696         int factor;
8697
8698         root = root->fs_info->extent_root;
8699
8700         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8701         BUG_ON(!block_group);
8702         BUG_ON(!block_group->ro);
8703
8704         /*
8705          * Free the reserved super bytes from this block group before
8706          * remove it.
8707          */
8708         free_excluded_extents(root, block_group);
8709
8710         memcpy(&key, &block_group->key, sizeof(key));
8711         index = get_block_group_index(block_group);
8712         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8713                                   BTRFS_BLOCK_GROUP_RAID1 |
8714                                   BTRFS_BLOCK_GROUP_RAID10))
8715                 factor = 2;
8716         else
8717                 factor = 1;
8718
8719         /* make sure this block group isn't part of an allocation cluster */
8720         cluster = &root->fs_info->data_alloc_cluster;
8721         spin_lock(&cluster->refill_lock);
8722         btrfs_return_cluster_to_free_space(block_group, cluster);
8723         spin_unlock(&cluster->refill_lock);
8724
8725         /*
8726          * make sure this block group isn't part of a metadata
8727          * allocation cluster
8728          */
8729         cluster = &root->fs_info->meta_alloc_cluster;
8730         spin_lock(&cluster->refill_lock);
8731         btrfs_return_cluster_to_free_space(block_group, cluster);
8732         spin_unlock(&cluster->refill_lock);
8733
8734         path = btrfs_alloc_path();
8735         if (!path) {
8736                 ret = -ENOMEM;
8737                 goto out;
8738         }
8739
8740         inode = lookup_free_space_inode(tree_root, block_group, path);
8741         if (!IS_ERR(inode)) {
8742                 ret = btrfs_orphan_add(trans, inode);
8743                 if (ret) {
8744                         btrfs_add_delayed_iput(inode);
8745                         goto out;
8746                 }
8747                 clear_nlink(inode);
8748                 /* One for the block groups ref */
8749                 spin_lock(&block_group->lock);
8750                 if (block_group->iref) {
8751                         block_group->iref = 0;
8752                         block_group->inode = NULL;
8753                         spin_unlock(&block_group->lock);
8754                         iput(inode);
8755                 } else {
8756                         spin_unlock(&block_group->lock);
8757                 }
8758                 /* One for our lookup ref */
8759                 btrfs_add_delayed_iput(inode);
8760         }
8761
8762         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8763         key.offset = block_group->key.objectid;
8764         key.type = 0;
8765
8766         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8767         if (ret < 0)
8768                 goto out;
8769         if (ret > 0)
8770                 btrfs_release_path(path);
8771         if (ret == 0) {
8772                 ret = btrfs_del_item(trans, tree_root, path);
8773                 if (ret)
8774                         goto out;
8775                 btrfs_release_path(path);
8776         }
8777
8778         spin_lock(&root->fs_info->block_group_cache_lock);
8779         rb_erase(&block_group->cache_node,
8780                  &root->fs_info->block_group_cache_tree);
8781
8782         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8783                 root->fs_info->first_logical_byte = (u64)-1;
8784         spin_unlock(&root->fs_info->block_group_cache_lock);
8785
8786         down_write(&block_group->space_info->groups_sem);
8787         /*
8788          * we must use list_del_init so people can check to see if they
8789          * are still on the list after taking the semaphore
8790          */
8791         list_del_init(&block_group->list);
8792         if (list_empty(&block_group->space_info->block_groups[index])) {
8793                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8794                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8795                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8796         }
8797         up_write(&block_group->space_info->groups_sem);
8798
8799         if (block_group->cached == BTRFS_CACHE_STARTED)
8800                 wait_block_group_cache_done(block_group);
8801
8802         btrfs_remove_free_space_cache(block_group);
8803
8804         spin_lock(&block_group->space_info->lock);
8805         block_group->space_info->total_bytes -= block_group->key.offset;
8806         block_group->space_info->bytes_readonly -= block_group->key.offset;
8807         block_group->space_info->disk_total -= block_group->key.offset * factor;
8808         spin_unlock(&block_group->space_info->lock);
8809
8810         memcpy(&key, &block_group->key, sizeof(key));
8811
8812         btrfs_clear_space_info_full(root->fs_info);
8813
8814         btrfs_put_block_group(block_group);
8815         btrfs_put_block_group(block_group);
8816
8817         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8818         if (ret > 0)
8819                 ret = -EIO;
8820         if (ret < 0)
8821                 goto out;
8822
8823         ret = btrfs_del_item(trans, root, path);
8824 out:
8825         btrfs_free_path(path);
8826         return ret;
8827 }
8828
8829 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8830 {
8831         struct btrfs_space_info *space_info;
8832         struct btrfs_super_block *disk_super;
8833         u64 features;
8834         u64 flags;
8835         int mixed = 0;
8836         int ret;
8837
8838         disk_super = fs_info->super_copy;
8839         if (!btrfs_super_root(disk_super))
8840                 return 1;
8841
8842         features = btrfs_super_incompat_flags(disk_super);
8843         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8844                 mixed = 1;
8845
8846         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8847         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8848         if (ret)
8849                 goto out;
8850
8851         if (mixed) {
8852                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8853                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8854         } else {
8855                 flags = BTRFS_BLOCK_GROUP_METADATA;
8856                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8857                 if (ret)
8858                         goto out;
8859
8860                 flags = BTRFS_BLOCK_GROUP_DATA;
8861                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8862         }
8863 out:
8864         return ret;
8865 }
8866
8867 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8868 {
8869         return unpin_extent_range(root, start, end);
8870 }
8871
8872 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8873                                u64 num_bytes, u64 *actual_bytes)
8874 {
8875         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8876 }
8877
8878 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8879 {
8880         struct btrfs_fs_info *fs_info = root->fs_info;
8881         struct btrfs_block_group_cache *cache = NULL;
8882         u64 group_trimmed;
8883         u64 start;
8884         u64 end;
8885         u64 trimmed = 0;
8886         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8887         int ret = 0;
8888
8889         /*
8890          * try to trim all FS space, our block group may start from non-zero.
8891          */
8892         if (range->len == total_bytes)
8893                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8894         else
8895                 cache = btrfs_lookup_block_group(fs_info, range->start);
8896
8897         while (cache) {
8898                 if (cache->key.objectid >= (range->start + range->len)) {
8899                         btrfs_put_block_group(cache);
8900                         break;
8901                 }
8902
8903                 start = max(range->start, cache->key.objectid);
8904                 end = min(range->start + range->len,
8905                                 cache->key.objectid + cache->key.offset);
8906
8907                 if (end - start >= range->minlen) {
8908                         if (!block_group_cache_done(cache)) {
8909                                 ret = cache_block_group(cache, 0);
8910                                 if (ret) {
8911                                         btrfs_put_block_group(cache);
8912                                         break;
8913                                 }
8914                                 ret = wait_block_group_cache_done(cache);
8915                                 if (ret) {
8916                                         btrfs_put_block_group(cache);
8917                                         break;
8918                                 }
8919                         }
8920                         ret = btrfs_trim_block_group(cache,
8921                                                      &group_trimmed,
8922                                                      start,
8923                                                      end,
8924                                                      range->minlen);
8925
8926                         trimmed += group_trimmed;
8927                         if (ret) {
8928                                 btrfs_put_block_group(cache);
8929                                 break;
8930                         }
8931                 }
8932
8933                 cache = next_block_group(fs_info->tree_root, cache);
8934         }
8935
8936         range->len = trimmed;
8937         return ret;
8938 }
8939
8940 /*
8941  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
8942  * they are used to prevent the some tasks writing data into the page cache
8943  * by nocow before the subvolume is snapshoted, but flush the data into
8944  * the disk after the snapshot creation.
8945  */
8946 void btrfs_end_nocow_write(struct btrfs_root *root)
8947 {
8948         percpu_counter_dec(&root->subv_writers->counter);
8949         /*
8950          * Make sure counter is updated before we wake up
8951          * waiters.
8952          */
8953         smp_mb();
8954         if (waitqueue_active(&root->subv_writers->wait))
8955                 wake_up(&root->subv_writers->wait);
8956 }
8957
8958 int btrfs_start_nocow_write(struct btrfs_root *root)
8959 {
8960         if (unlikely(atomic_read(&root->will_be_snapshoted)))
8961                 return 0;
8962
8963         percpu_counter_inc(&root->subv_writers->counter);
8964         /*
8965          * Make sure counter is updated before we check for snapshot creation.
8966          */
8967         smp_mb();
8968         if (unlikely(atomic_read(&root->will_be_snapshoted))) {
8969                 btrfs_end_nocow_write(root);
8970                 return 0;
8971         }
8972         return 1;
8973 }