Btrfs: fix regression when running delayed references
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337                                 struct btrfs_block_group_cache *block_group)
338 {
339         u64 start = block_group->key.objectid;
340         u64 len = block_group->key.offset;
341         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342                 root->nodesize : root->sectorsize;
343         u64 step = chunk << 1;
344
345         while (len > chunk) {
346                 btrfs_remove_free_space(block_group, start, chunk);
347                 start += step;
348                 if (len < step)
349                         len = 0;
350                 else
351                         len -= step;
352         }
353 }
354 #endif
355
356 /*
357  * this is only called by cache_block_group, since we could have freed extents
358  * we need to check the pinned_extents for any extents that can't be used yet
359  * since their free space will be released as soon as the transaction commits.
360  */
361 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362                               struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364         u64 extent_start, extent_end, size, total_added = 0;
365         int ret;
366
367         while (start < end) {
368                 ret = find_first_extent_bit(info->pinned_extents, start,
369                                             &extent_start, &extent_end,
370                                             EXTENT_DIRTY | EXTENT_UPTODATE,
371                                             NULL);
372                 if (ret)
373                         break;
374
375                 if (extent_start <= start) {
376                         start = extent_end + 1;
377                 } else if (extent_start > start && extent_start < end) {
378                         size = extent_start - start;
379                         total_added += size;
380                         ret = btrfs_add_free_space(block_group, start,
381                                                    size);
382                         BUG_ON(ret); /* -ENOMEM or logic error */
383                         start = extent_end + 1;
384                 } else {
385                         break;
386                 }
387         }
388
389         if (start < end) {
390                 size = end - start;
391                 total_added += size;
392                 ret = btrfs_add_free_space(block_group, start, size);
393                 BUG_ON(ret); /* -ENOMEM or logic error */
394         }
395
396         return total_added;
397 }
398
399 static noinline void caching_thread(struct btrfs_work *work)
400 {
401         struct btrfs_block_group_cache *block_group;
402         struct btrfs_fs_info *fs_info;
403         struct btrfs_caching_control *caching_ctl;
404         struct btrfs_root *extent_root;
405         struct btrfs_path *path;
406         struct extent_buffer *leaf;
407         struct btrfs_key key;
408         u64 total_found = 0;
409         u64 last = 0;
410         u32 nritems;
411         int ret = -ENOMEM;
412         bool wakeup = true;
413
414         caching_ctl = container_of(work, struct btrfs_caching_control, work);
415         block_group = caching_ctl->block_group;
416         fs_info = block_group->fs_info;
417         extent_root = fs_info->extent_root;
418
419         path = btrfs_alloc_path();
420         if (!path)
421                 goto out;
422
423         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424
425 #ifdef CONFIG_BTRFS_DEBUG
426         /*
427          * If we're fragmenting we don't want to make anybody think we can
428          * allocate from this block group until we've had a chance to fragment
429          * the free space.
430          */
431         if (btrfs_should_fragment_free_space(extent_root, block_group))
432                 wakeup = false;
433 #endif
434         /*
435          * We don't want to deadlock with somebody trying to allocate a new
436          * extent for the extent root while also trying to search the extent
437          * root to add free space.  So we skip locking and search the commit
438          * root, since its read-only
439          */
440         path->skip_locking = 1;
441         path->search_commit_root = 1;
442         path->reada = 1;
443
444         key.objectid = last;
445         key.offset = 0;
446         key.type = BTRFS_EXTENT_ITEM_KEY;
447 again:
448         mutex_lock(&caching_ctl->mutex);
449         /* need to make sure the commit_root doesn't disappear */
450         down_read(&fs_info->commit_root_sem);
451
452 next:
453         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
454         if (ret < 0)
455                 goto err;
456
457         leaf = path->nodes[0];
458         nritems = btrfs_header_nritems(leaf);
459
460         while (1) {
461                 if (btrfs_fs_closing(fs_info) > 1) {
462                         last = (u64)-1;
463                         break;
464                 }
465
466                 if (path->slots[0] < nritems) {
467                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
468                 } else {
469                         ret = find_next_key(path, 0, &key);
470                         if (ret)
471                                 break;
472
473                         if (need_resched() ||
474                             rwsem_is_contended(&fs_info->commit_root_sem)) {
475                                 if (wakeup)
476                                         caching_ctl->progress = last;
477                                 btrfs_release_path(path);
478                                 up_read(&fs_info->commit_root_sem);
479                                 mutex_unlock(&caching_ctl->mutex);
480                                 cond_resched();
481                                 goto again;
482                         }
483
484                         ret = btrfs_next_leaf(extent_root, path);
485                         if (ret < 0)
486                                 goto err;
487                         if (ret)
488                                 break;
489                         leaf = path->nodes[0];
490                         nritems = btrfs_header_nritems(leaf);
491                         continue;
492                 }
493
494                 if (key.objectid < last) {
495                         key.objectid = last;
496                         key.offset = 0;
497                         key.type = BTRFS_EXTENT_ITEM_KEY;
498
499                         if (wakeup)
500                                 caching_ctl->progress = last;
501                         btrfs_release_path(path);
502                         goto next;
503                 }
504
505                 if (key.objectid < block_group->key.objectid) {
506                         path->slots[0]++;
507                         continue;
508                 }
509
510                 if (key.objectid >= block_group->key.objectid +
511                     block_group->key.offset)
512                         break;
513
514                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
515                     key.type == BTRFS_METADATA_ITEM_KEY) {
516                         total_found += add_new_free_space(block_group,
517                                                           fs_info, last,
518                                                           key.objectid);
519                         if (key.type == BTRFS_METADATA_ITEM_KEY)
520                                 last = key.objectid +
521                                         fs_info->tree_root->nodesize;
522                         else
523                                 last = key.objectid + key.offset;
524
525                         if (total_found > (1024 * 1024 * 2)) {
526                                 total_found = 0;
527                                 if (wakeup)
528                                         wake_up(&caching_ctl->wait);
529                         }
530                 }
531                 path->slots[0]++;
532         }
533         ret = 0;
534
535         total_found += add_new_free_space(block_group, fs_info, last,
536                                           block_group->key.objectid +
537                                           block_group->key.offset);
538         spin_lock(&block_group->lock);
539         block_group->caching_ctl = NULL;
540         block_group->cached = BTRFS_CACHE_FINISHED;
541         spin_unlock(&block_group->lock);
542
543 #ifdef CONFIG_BTRFS_DEBUG
544         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
545                 u64 bytes_used;
546
547                 spin_lock(&block_group->space_info->lock);
548                 spin_lock(&block_group->lock);
549                 bytes_used = block_group->key.offset -
550                         btrfs_block_group_used(&block_group->item);
551                 block_group->space_info->bytes_used += bytes_used >> 1;
552                 spin_unlock(&block_group->lock);
553                 spin_unlock(&block_group->space_info->lock);
554                 fragment_free_space(extent_root, block_group);
555         }
556 #endif
557
558         caching_ctl->progress = (u64)-1;
559 err:
560         btrfs_free_path(path);
561         up_read(&fs_info->commit_root_sem);
562
563         free_excluded_extents(extent_root, block_group);
564
565         mutex_unlock(&caching_ctl->mutex);
566 out:
567         if (ret) {
568                 spin_lock(&block_group->lock);
569                 block_group->caching_ctl = NULL;
570                 block_group->cached = BTRFS_CACHE_ERROR;
571                 spin_unlock(&block_group->lock);
572         }
573         wake_up(&caching_ctl->wait);
574
575         put_caching_control(caching_ctl);
576         btrfs_put_block_group(block_group);
577 }
578
579 static int cache_block_group(struct btrfs_block_group_cache *cache,
580                              int load_cache_only)
581 {
582         DEFINE_WAIT(wait);
583         struct btrfs_fs_info *fs_info = cache->fs_info;
584         struct btrfs_caching_control *caching_ctl;
585         int ret = 0;
586
587         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
588         if (!caching_ctl)
589                 return -ENOMEM;
590
591         INIT_LIST_HEAD(&caching_ctl->list);
592         mutex_init(&caching_ctl->mutex);
593         init_waitqueue_head(&caching_ctl->wait);
594         caching_ctl->block_group = cache;
595         caching_ctl->progress = cache->key.objectid;
596         atomic_set(&caching_ctl->count, 1);
597         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
598                         caching_thread, NULL, NULL);
599
600         spin_lock(&cache->lock);
601         /*
602          * This should be a rare occasion, but this could happen I think in the
603          * case where one thread starts to load the space cache info, and then
604          * some other thread starts a transaction commit which tries to do an
605          * allocation while the other thread is still loading the space cache
606          * info.  The previous loop should have kept us from choosing this block
607          * group, but if we've moved to the state where we will wait on caching
608          * block groups we need to first check if we're doing a fast load here,
609          * so we can wait for it to finish, otherwise we could end up allocating
610          * from a block group who's cache gets evicted for one reason or
611          * another.
612          */
613         while (cache->cached == BTRFS_CACHE_FAST) {
614                 struct btrfs_caching_control *ctl;
615
616                 ctl = cache->caching_ctl;
617                 atomic_inc(&ctl->count);
618                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
619                 spin_unlock(&cache->lock);
620
621                 schedule();
622
623                 finish_wait(&ctl->wait, &wait);
624                 put_caching_control(ctl);
625                 spin_lock(&cache->lock);
626         }
627
628         if (cache->cached != BTRFS_CACHE_NO) {
629                 spin_unlock(&cache->lock);
630                 kfree(caching_ctl);
631                 return 0;
632         }
633         WARN_ON(cache->caching_ctl);
634         cache->caching_ctl = caching_ctl;
635         cache->cached = BTRFS_CACHE_FAST;
636         spin_unlock(&cache->lock);
637
638         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
639                 mutex_lock(&caching_ctl->mutex);
640                 ret = load_free_space_cache(fs_info, cache);
641
642                 spin_lock(&cache->lock);
643                 if (ret == 1) {
644                         cache->caching_ctl = NULL;
645                         cache->cached = BTRFS_CACHE_FINISHED;
646                         cache->last_byte_to_unpin = (u64)-1;
647                         caching_ctl->progress = (u64)-1;
648                 } else {
649                         if (load_cache_only) {
650                                 cache->caching_ctl = NULL;
651                                 cache->cached = BTRFS_CACHE_NO;
652                         } else {
653                                 cache->cached = BTRFS_CACHE_STARTED;
654                                 cache->has_caching_ctl = 1;
655                         }
656                 }
657                 spin_unlock(&cache->lock);
658 #ifdef CONFIG_BTRFS_DEBUG
659                 if (ret == 1 &&
660                     btrfs_should_fragment_free_space(fs_info->extent_root,
661                                                      cache)) {
662                         u64 bytes_used;
663
664                         spin_lock(&cache->space_info->lock);
665                         spin_lock(&cache->lock);
666                         bytes_used = cache->key.offset -
667                                 btrfs_block_group_used(&cache->item);
668                         cache->space_info->bytes_used += bytes_used >> 1;
669                         spin_unlock(&cache->lock);
670                         spin_unlock(&cache->space_info->lock);
671                         fragment_free_space(fs_info->extent_root, cache);
672                 }
673 #endif
674                 mutex_unlock(&caching_ctl->mutex);
675
676                 wake_up(&caching_ctl->wait);
677                 if (ret == 1) {
678                         put_caching_control(caching_ctl);
679                         free_excluded_extents(fs_info->extent_root, cache);
680                         return 0;
681                 }
682         } else {
683                 /*
684                  * We are not going to do the fast caching, set cached to the
685                  * appropriate value and wakeup any waiters.
686                  */
687                 spin_lock(&cache->lock);
688                 if (load_cache_only) {
689                         cache->caching_ctl = NULL;
690                         cache->cached = BTRFS_CACHE_NO;
691                 } else {
692                         cache->cached = BTRFS_CACHE_STARTED;
693                         cache->has_caching_ctl = 1;
694                 }
695                 spin_unlock(&cache->lock);
696                 wake_up(&caching_ctl->wait);
697         }
698
699         if (load_cache_only) {
700                 put_caching_control(caching_ctl);
701                 return 0;
702         }
703
704         down_write(&fs_info->commit_root_sem);
705         atomic_inc(&caching_ctl->count);
706         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
707         up_write(&fs_info->commit_root_sem);
708
709         btrfs_get_block_group(cache);
710
711         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
712
713         return ret;
714 }
715
716 /*
717  * return the block group that starts at or after bytenr
718  */
719 static struct btrfs_block_group_cache *
720 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
721 {
722         struct btrfs_block_group_cache *cache;
723
724         cache = block_group_cache_tree_search(info, bytenr, 0);
725
726         return cache;
727 }
728
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733                                                  struct btrfs_fs_info *info,
734                                                  u64 bytenr)
735 {
736         struct btrfs_block_group_cache *cache;
737
738         cache = block_group_cache_tree_search(info, bytenr, 1);
739
740         return cache;
741 }
742
743 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
744                                                   u64 flags)
745 {
746         struct list_head *head = &info->space_info;
747         struct btrfs_space_info *found;
748
749         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
750
751         rcu_read_lock();
752         list_for_each_entry_rcu(found, head, list) {
753                 if (found->flags & flags) {
754                         rcu_read_unlock();
755                         return found;
756                 }
757         }
758         rcu_read_unlock();
759         return NULL;
760 }
761
762 /*
763  * after adding space to the filesystem, we need to clear the full flags
764  * on all the space infos.
765  */
766 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
767 {
768         struct list_head *head = &info->space_info;
769         struct btrfs_space_info *found;
770
771         rcu_read_lock();
772         list_for_each_entry_rcu(found, head, list)
773                 found->full = 0;
774         rcu_read_unlock();
775 }
776
777 /* simple helper to search for an existing data extent at a given offset */
778 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
779 {
780         int ret;
781         struct btrfs_key key;
782         struct btrfs_path *path;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         key.objectid = start;
789         key.offset = len;
790         key.type = BTRFS_EXTENT_ITEM_KEY;
791         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
792                                 0, 0);
793         btrfs_free_path(path);
794         return ret;
795 }
796
797 /*
798  * helper function to lookup reference count and flags of a tree block.
799  *
800  * the head node for delayed ref is used to store the sum of all the
801  * reference count modifications queued up in the rbtree. the head
802  * node may also store the extent flags to set. This way you can check
803  * to see what the reference count and extent flags would be if all of
804  * the delayed refs are not processed.
805  */
806 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
807                              struct btrfs_root *root, u64 bytenr,
808                              u64 offset, int metadata, u64 *refs, u64 *flags)
809 {
810         struct btrfs_delayed_ref_head *head;
811         struct btrfs_delayed_ref_root *delayed_refs;
812         struct btrfs_path *path;
813         struct btrfs_extent_item *ei;
814         struct extent_buffer *leaf;
815         struct btrfs_key key;
816         u32 item_size;
817         u64 num_refs;
818         u64 extent_flags;
819         int ret;
820
821         /*
822          * If we don't have skinny metadata, don't bother doing anything
823          * different
824          */
825         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
826                 offset = root->nodesize;
827                 metadata = 0;
828         }
829
830         path = btrfs_alloc_path();
831         if (!path)
832                 return -ENOMEM;
833
834         if (!trans) {
835                 path->skip_locking = 1;
836                 path->search_commit_root = 1;
837         }
838
839 search_again:
840         key.objectid = bytenr;
841         key.offset = offset;
842         if (metadata)
843                 key.type = BTRFS_METADATA_ITEM_KEY;
844         else
845                 key.type = BTRFS_EXTENT_ITEM_KEY;
846
847         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
848                                 &key, path, 0, 0);
849         if (ret < 0)
850                 goto out_free;
851
852         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
853                 if (path->slots[0]) {
854                         path->slots[0]--;
855                         btrfs_item_key_to_cpu(path->nodes[0], &key,
856                                               path->slots[0]);
857                         if (key.objectid == bytenr &&
858                             key.type == BTRFS_EXTENT_ITEM_KEY &&
859                             key.offset == root->nodesize)
860                                 ret = 0;
861                 }
862         }
863
864         if (ret == 0) {
865                 leaf = path->nodes[0];
866                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
867                 if (item_size >= sizeof(*ei)) {
868                         ei = btrfs_item_ptr(leaf, path->slots[0],
869                                             struct btrfs_extent_item);
870                         num_refs = btrfs_extent_refs(leaf, ei);
871                         extent_flags = btrfs_extent_flags(leaf, ei);
872                 } else {
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874                         struct btrfs_extent_item_v0 *ei0;
875                         BUG_ON(item_size != sizeof(*ei0));
876                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
877                                              struct btrfs_extent_item_v0);
878                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
879                         /* FIXME: this isn't correct for data */
880                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
881 #else
882                         BUG();
883 #endif
884                 }
885                 BUG_ON(num_refs == 0);
886         } else {
887                 num_refs = 0;
888                 extent_flags = 0;
889                 ret = 0;
890         }
891
892         if (!trans)
893                 goto out;
894
895         delayed_refs = &trans->transaction->delayed_refs;
896         spin_lock(&delayed_refs->lock);
897         head = btrfs_find_delayed_ref_head(trans, bytenr);
898         if (head) {
899                 if (!mutex_trylock(&head->mutex)) {
900                         atomic_inc(&head->node.refs);
901                         spin_unlock(&delayed_refs->lock);
902
903                         btrfs_release_path(path);
904
905                         /*
906                          * Mutex was contended, block until it's released and try
907                          * again
908                          */
909                         mutex_lock(&head->mutex);
910                         mutex_unlock(&head->mutex);
911                         btrfs_put_delayed_ref(&head->node);
912                         goto search_again;
913                 }
914                 spin_lock(&head->lock);
915                 if (head->extent_op && head->extent_op->update_flags)
916                         extent_flags |= head->extent_op->flags_to_set;
917                 else
918                         BUG_ON(num_refs == 0);
919
920                 num_refs += head->node.ref_mod;
921                 spin_unlock(&head->lock);
922                 mutex_unlock(&head->mutex);
923         }
924         spin_unlock(&delayed_refs->lock);
925 out:
926         WARN_ON(num_refs == 0);
927         if (refs)
928                 *refs = num_refs;
929         if (flags)
930                 *flags = extent_flags;
931 out_free:
932         btrfs_free_path(path);
933         return ret;
934 }
935
936 /*
937  * Back reference rules.  Back refs have three main goals:
938  *
939  * 1) differentiate between all holders of references to an extent so that
940  *    when a reference is dropped we can make sure it was a valid reference
941  *    before freeing the extent.
942  *
943  * 2) Provide enough information to quickly find the holders of an extent
944  *    if we notice a given block is corrupted or bad.
945  *
946  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
947  *    maintenance.  This is actually the same as #2, but with a slightly
948  *    different use case.
949  *
950  * There are two kinds of back refs. The implicit back refs is optimized
951  * for pointers in non-shared tree blocks. For a given pointer in a block,
952  * back refs of this kind provide information about the block's owner tree
953  * and the pointer's key. These information allow us to find the block by
954  * b-tree searching. The full back refs is for pointers in tree blocks not
955  * referenced by their owner trees. The location of tree block is recorded
956  * in the back refs. Actually the full back refs is generic, and can be
957  * used in all cases the implicit back refs is used. The major shortcoming
958  * of the full back refs is its overhead. Every time a tree block gets
959  * COWed, we have to update back refs entry for all pointers in it.
960  *
961  * For a newly allocated tree block, we use implicit back refs for
962  * pointers in it. This means most tree related operations only involve
963  * implicit back refs. For a tree block created in old transaction, the
964  * only way to drop a reference to it is COW it. So we can detect the
965  * event that tree block loses its owner tree's reference and do the
966  * back refs conversion.
967  *
968  * When a tree block is COW'd through a tree, there are four cases:
969  *
970  * The reference count of the block is one and the tree is the block's
971  * owner tree. Nothing to do in this case.
972  *
973  * The reference count of the block is one and the tree is not the
974  * block's owner tree. In this case, full back refs is used for pointers
975  * in the block. Remove these full back refs, add implicit back refs for
976  * every pointers in the new block.
977  *
978  * The reference count of the block is greater than one and the tree is
979  * the block's owner tree. In this case, implicit back refs is used for
980  * pointers in the block. Add full back refs for every pointers in the
981  * block, increase lower level extents' reference counts. The original
982  * implicit back refs are entailed to the new block.
983  *
984  * The reference count of the block is greater than one and the tree is
985  * not the block's owner tree. Add implicit back refs for every pointer in
986  * the new block, increase lower level extents' reference count.
987  *
988  * Back Reference Key composing:
989  *
990  * The key objectid corresponds to the first byte in the extent,
991  * The key type is used to differentiate between types of back refs.
992  * There are different meanings of the key offset for different types
993  * of back refs.
994  *
995  * File extents can be referenced by:
996  *
997  * - multiple snapshots, subvolumes, or different generations in one subvol
998  * - different files inside a single subvolume
999  * - different offsets inside a file (bookend extents in file.c)
1000  *
1001  * The extent ref structure for the implicit back refs has fields for:
1002  *
1003  * - Objectid of the subvolume root
1004  * - objectid of the file holding the reference
1005  * - original offset in the file
1006  * - how many bookend extents
1007  *
1008  * The key offset for the implicit back refs is hash of the first
1009  * three fields.
1010  *
1011  * The extent ref structure for the full back refs has field for:
1012  *
1013  * - number of pointers in the tree leaf
1014  *
1015  * The key offset for the implicit back refs is the first byte of
1016  * the tree leaf
1017  *
1018  * When a file extent is allocated, The implicit back refs is used.
1019  * the fields are filled in:
1020  *
1021  *     (root_key.objectid, inode objectid, offset in file, 1)
1022  *
1023  * When a file extent is removed file truncation, we find the
1024  * corresponding implicit back refs and check the following fields:
1025  *
1026  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1027  *
1028  * Btree extents can be referenced by:
1029  *
1030  * - Different subvolumes
1031  *
1032  * Both the implicit back refs and the full back refs for tree blocks
1033  * only consist of key. The key offset for the implicit back refs is
1034  * objectid of block's owner tree. The key offset for the full back refs
1035  * is the first byte of parent block.
1036  *
1037  * When implicit back refs is used, information about the lowest key and
1038  * level of the tree block are required. These information are stored in
1039  * tree block info structure.
1040  */
1041
1042 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1043 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1044                                   struct btrfs_root *root,
1045                                   struct btrfs_path *path,
1046                                   u64 owner, u32 extra_size)
1047 {
1048         struct btrfs_extent_item *item;
1049         struct btrfs_extent_item_v0 *ei0;
1050         struct btrfs_extent_ref_v0 *ref0;
1051         struct btrfs_tree_block_info *bi;
1052         struct extent_buffer *leaf;
1053         struct btrfs_key key;
1054         struct btrfs_key found_key;
1055         u32 new_size = sizeof(*item);
1056         u64 refs;
1057         int ret;
1058
1059         leaf = path->nodes[0];
1060         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1061
1062         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1063         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1064                              struct btrfs_extent_item_v0);
1065         refs = btrfs_extent_refs_v0(leaf, ei0);
1066
1067         if (owner == (u64)-1) {
1068                 while (1) {
1069                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1070                                 ret = btrfs_next_leaf(root, path);
1071                                 if (ret < 0)
1072                                         return ret;
1073                                 BUG_ON(ret > 0); /* Corruption */
1074                                 leaf = path->nodes[0];
1075                         }
1076                         btrfs_item_key_to_cpu(leaf, &found_key,
1077                                               path->slots[0]);
1078                         BUG_ON(key.objectid != found_key.objectid);
1079                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1080                                 path->slots[0]++;
1081                                 continue;
1082                         }
1083                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1084                                               struct btrfs_extent_ref_v0);
1085                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1086                         break;
1087                 }
1088         }
1089         btrfs_release_path(path);
1090
1091         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1092                 new_size += sizeof(*bi);
1093
1094         new_size -= sizeof(*ei0);
1095         ret = btrfs_search_slot(trans, root, &key, path,
1096                                 new_size + extra_size, 1);
1097         if (ret < 0)
1098                 return ret;
1099         BUG_ON(ret); /* Corruption */
1100
1101         btrfs_extend_item(root, path, new_size);
1102
1103         leaf = path->nodes[0];
1104         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1105         btrfs_set_extent_refs(leaf, item, refs);
1106         /* FIXME: get real generation */
1107         btrfs_set_extent_generation(leaf, item, 0);
1108         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1109                 btrfs_set_extent_flags(leaf, item,
1110                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1111                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1112                 bi = (struct btrfs_tree_block_info *)(item + 1);
1113                 /* FIXME: get first key of the block */
1114                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1115                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1116         } else {
1117                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1118         }
1119         btrfs_mark_buffer_dirty(leaf);
1120         return 0;
1121 }
1122 #endif
1123
1124 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1125 {
1126         u32 high_crc = ~(u32)0;
1127         u32 low_crc = ~(u32)0;
1128         __le64 lenum;
1129
1130         lenum = cpu_to_le64(root_objectid);
1131         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1132         lenum = cpu_to_le64(owner);
1133         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1134         lenum = cpu_to_le64(offset);
1135         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136
1137         return ((u64)high_crc << 31) ^ (u64)low_crc;
1138 }
1139
1140 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1141                                      struct btrfs_extent_data_ref *ref)
1142 {
1143         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1144                                     btrfs_extent_data_ref_objectid(leaf, ref),
1145                                     btrfs_extent_data_ref_offset(leaf, ref));
1146 }
1147
1148 static int match_extent_data_ref(struct extent_buffer *leaf,
1149                                  struct btrfs_extent_data_ref *ref,
1150                                  u64 root_objectid, u64 owner, u64 offset)
1151 {
1152         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1153             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1154             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1155                 return 0;
1156         return 1;
1157 }
1158
1159 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid,
1164                                            u64 owner, u64 offset)
1165 {
1166         struct btrfs_key key;
1167         struct btrfs_extent_data_ref *ref;
1168         struct extent_buffer *leaf;
1169         u32 nritems;
1170         int ret;
1171         int recow;
1172         int err = -ENOENT;
1173
1174         key.objectid = bytenr;
1175         if (parent) {
1176                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1177                 key.offset = parent;
1178         } else {
1179                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1180                 key.offset = hash_extent_data_ref(root_objectid,
1181                                                   owner, offset);
1182         }
1183 again:
1184         recow = 0;
1185         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1186         if (ret < 0) {
1187                 err = ret;
1188                 goto fail;
1189         }
1190
1191         if (parent) {
1192                 if (!ret)
1193                         return 0;
1194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1195                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1196                 btrfs_release_path(path);
1197                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1198                 if (ret < 0) {
1199                         err = ret;
1200                         goto fail;
1201                 }
1202                 if (!ret)
1203                         return 0;
1204 #endif
1205                 goto fail;
1206         }
1207
1208         leaf = path->nodes[0];
1209         nritems = btrfs_header_nritems(leaf);
1210         while (1) {
1211                 if (path->slots[0] >= nritems) {
1212                         ret = btrfs_next_leaf(root, path);
1213                         if (ret < 0)
1214                                 err = ret;
1215                         if (ret)
1216                                 goto fail;
1217
1218                         leaf = path->nodes[0];
1219                         nritems = btrfs_header_nritems(leaf);
1220                         recow = 1;
1221                 }
1222
1223                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1224                 if (key.objectid != bytenr ||
1225                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1226                         goto fail;
1227
1228                 ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                      struct btrfs_extent_data_ref);
1230
1231                 if (match_extent_data_ref(leaf, ref, root_objectid,
1232                                           owner, offset)) {
1233                         if (recow) {
1234                                 btrfs_release_path(path);
1235                                 goto again;
1236                         }
1237                         err = 0;
1238                         break;
1239                 }
1240                 path->slots[0]++;
1241         }
1242 fail:
1243         return err;
1244 }
1245
1246 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1247                                            struct btrfs_root *root,
1248                                            struct btrfs_path *path,
1249                                            u64 bytenr, u64 parent,
1250                                            u64 root_objectid, u64 owner,
1251                                            u64 offset, int refs_to_add)
1252 {
1253         struct btrfs_key key;
1254         struct extent_buffer *leaf;
1255         u32 size;
1256         u32 num_refs;
1257         int ret;
1258
1259         key.objectid = bytenr;
1260         if (parent) {
1261                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1262                 key.offset = parent;
1263                 size = sizeof(struct btrfs_shared_data_ref);
1264         } else {
1265                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1266                 key.offset = hash_extent_data_ref(root_objectid,
1267                                                   owner, offset);
1268                 size = sizeof(struct btrfs_extent_data_ref);
1269         }
1270
1271         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1272         if (ret && ret != -EEXIST)
1273                 goto fail;
1274
1275         leaf = path->nodes[0];
1276         if (parent) {
1277                 struct btrfs_shared_data_ref *ref;
1278                 ref = btrfs_item_ptr(leaf, path->slots[0],
1279                                      struct btrfs_shared_data_ref);
1280                 if (ret == 0) {
1281                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1282                 } else {
1283                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1284                         num_refs += refs_to_add;
1285                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1286                 }
1287         } else {
1288                 struct btrfs_extent_data_ref *ref;
1289                 while (ret == -EEXIST) {
1290                         ref = btrfs_item_ptr(leaf, path->slots[0],
1291                                              struct btrfs_extent_data_ref);
1292                         if (match_extent_data_ref(leaf, ref, root_objectid,
1293                                                   owner, offset))
1294                                 break;
1295                         btrfs_release_path(path);
1296                         key.offset++;
1297                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1298                                                       size);
1299                         if (ret && ret != -EEXIST)
1300                                 goto fail;
1301
1302                         leaf = path->nodes[0];
1303                 }
1304                 ref = btrfs_item_ptr(leaf, path->slots[0],
1305                                      struct btrfs_extent_data_ref);
1306                 if (ret == 0) {
1307                         btrfs_set_extent_data_ref_root(leaf, ref,
1308                                                        root_objectid);
1309                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1310                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1311                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1312                 } else {
1313                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1314                         num_refs += refs_to_add;
1315                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1316                 }
1317         }
1318         btrfs_mark_buffer_dirty(leaf);
1319         ret = 0;
1320 fail:
1321         btrfs_release_path(path);
1322         return ret;
1323 }
1324
1325 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1326                                            struct btrfs_root *root,
1327                                            struct btrfs_path *path,
1328                                            int refs_to_drop, int *last_ref)
1329 {
1330         struct btrfs_key key;
1331         struct btrfs_extent_data_ref *ref1 = NULL;
1332         struct btrfs_shared_data_ref *ref2 = NULL;
1333         struct extent_buffer *leaf;
1334         u32 num_refs = 0;
1335         int ret = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339
1340         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 BUG();
1357         }
1358
1359         BUG_ON(num_refs < refs_to_drop);
1360         num_refs -= refs_to_drop;
1361
1362         if (num_refs == 0) {
1363                 ret = btrfs_del_item(trans, root, path);
1364                 *last_ref = 1;
1365         } else {
1366                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1367                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1368                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1369                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1370 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1371                 else {
1372                         struct btrfs_extent_ref_v0 *ref0;
1373                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1374                                         struct btrfs_extent_ref_v0);
1375                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1376                 }
1377 #endif
1378                 btrfs_mark_buffer_dirty(leaf);
1379         }
1380         return ret;
1381 }
1382
1383 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1384                                           struct btrfs_extent_inline_ref *iref)
1385 {
1386         struct btrfs_key key;
1387         struct extent_buffer *leaf;
1388         struct btrfs_extent_data_ref *ref1;
1389         struct btrfs_shared_data_ref *ref2;
1390         u32 num_refs = 0;
1391
1392         leaf = path->nodes[0];
1393         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1394         if (iref) {
1395                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1396                     BTRFS_EXTENT_DATA_REF_KEY) {
1397                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1398                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1399                 } else {
1400                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1401                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1402                 }
1403         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1404                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1405                                       struct btrfs_extent_data_ref);
1406                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1407         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1408                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1409                                       struct btrfs_shared_data_ref);
1410                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1411 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1412         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1413                 struct btrfs_extent_ref_v0 *ref0;
1414                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1415                                       struct btrfs_extent_ref_v0);
1416                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1417 #endif
1418         } else {
1419                 WARN_ON(1);
1420         }
1421         return num_refs;
1422 }
1423
1424 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1425                                           struct btrfs_root *root,
1426                                           struct btrfs_path *path,
1427                                           u64 bytenr, u64 parent,
1428                                           u64 root_objectid)
1429 {
1430         struct btrfs_key key;
1431         int ret;
1432
1433         key.objectid = bytenr;
1434         if (parent) {
1435                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1436                 key.offset = parent;
1437         } else {
1438                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1439                 key.offset = root_objectid;
1440         }
1441
1442         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1443         if (ret > 0)
1444                 ret = -ENOENT;
1445 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1446         if (ret == -ENOENT && parent) {
1447                 btrfs_release_path(path);
1448                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1449                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1450                 if (ret > 0)
1451                         ret = -ENOENT;
1452         }
1453 #endif
1454         return ret;
1455 }
1456
1457 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1458                                           struct btrfs_root *root,
1459                                           struct btrfs_path *path,
1460                                           u64 bytenr, u64 parent,
1461                                           u64 root_objectid)
1462 {
1463         struct btrfs_key key;
1464         int ret;
1465
1466         key.objectid = bytenr;
1467         if (parent) {
1468                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1469                 key.offset = parent;
1470         } else {
1471                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1472                 key.offset = root_objectid;
1473         }
1474
1475         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1476         btrfs_release_path(path);
1477         return ret;
1478 }
1479
1480 static inline int extent_ref_type(u64 parent, u64 owner)
1481 {
1482         int type;
1483         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1484                 if (parent > 0)
1485                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1486                 else
1487                         type = BTRFS_TREE_BLOCK_REF_KEY;
1488         } else {
1489                 if (parent > 0)
1490                         type = BTRFS_SHARED_DATA_REF_KEY;
1491                 else
1492                         type = BTRFS_EXTENT_DATA_REF_KEY;
1493         }
1494         return type;
1495 }
1496
1497 static int find_next_key(struct btrfs_path *path, int level,
1498                          struct btrfs_key *key)
1499
1500 {
1501         for (; level < BTRFS_MAX_LEVEL; level++) {
1502                 if (!path->nodes[level])
1503                         break;
1504                 if (path->slots[level] + 1 >=
1505                     btrfs_header_nritems(path->nodes[level]))
1506                         continue;
1507                 if (level == 0)
1508                         btrfs_item_key_to_cpu(path->nodes[level], key,
1509                                               path->slots[level] + 1);
1510                 else
1511                         btrfs_node_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 return 0;
1514         }
1515         return 1;
1516 }
1517
1518 /*
1519  * look for inline back ref. if back ref is found, *ref_ret is set
1520  * to the address of inline back ref, and 0 is returned.
1521  *
1522  * if back ref isn't found, *ref_ret is set to the address where it
1523  * should be inserted, and -ENOENT is returned.
1524  *
1525  * if insert is true and there are too many inline back refs, the path
1526  * points to the extent item, and -EAGAIN is returned.
1527  *
1528  * NOTE: inline back refs are ordered in the same way that back ref
1529  *       items in the tree are ordered.
1530  */
1531 static noinline_for_stack
1532 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1533                                  struct btrfs_root *root,
1534                                  struct btrfs_path *path,
1535                                  struct btrfs_extent_inline_ref **ref_ret,
1536                                  u64 bytenr, u64 num_bytes,
1537                                  u64 parent, u64 root_objectid,
1538                                  u64 owner, u64 offset, int insert)
1539 {
1540         struct btrfs_key key;
1541         struct extent_buffer *leaf;
1542         struct btrfs_extent_item *ei;
1543         struct btrfs_extent_inline_ref *iref;
1544         u64 flags;
1545         u64 item_size;
1546         unsigned long ptr;
1547         unsigned long end;
1548         int extra_size;
1549         int type;
1550         int want;
1551         int ret;
1552         int err = 0;
1553         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1554                                                  SKINNY_METADATA);
1555
1556         key.objectid = bytenr;
1557         key.type = BTRFS_EXTENT_ITEM_KEY;
1558         key.offset = num_bytes;
1559
1560         want = extent_ref_type(parent, owner);
1561         if (insert) {
1562                 extra_size = btrfs_extent_inline_ref_size(want);
1563                 path->keep_locks = 1;
1564         } else
1565                 extra_size = -1;
1566
1567         /*
1568          * Owner is our parent level, so we can just add one to get the level
1569          * for the block we are interested in.
1570          */
1571         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1572                 key.type = BTRFS_METADATA_ITEM_KEY;
1573                 key.offset = owner;
1574         }
1575
1576 again:
1577         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1578         if (ret < 0) {
1579                 err = ret;
1580                 goto out;
1581         }
1582
1583         /*
1584          * We may be a newly converted file system which still has the old fat
1585          * extent entries for metadata, so try and see if we have one of those.
1586          */
1587         if (ret > 0 && skinny_metadata) {
1588                 skinny_metadata = false;
1589                 if (path->slots[0]) {
1590                         path->slots[0]--;
1591                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1592                                               path->slots[0]);
1593                         if (key.objectid == bytenr &&
1594                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1595                             key.offset == num_bytes)
1596                                 ret = 0;
1597                 }
1598                 if (ret) {
1599                         key.objectid = bytenr;
1600                         key.type = BTRFS_EXTENT_ITEM_KEY;
1601                         key.offset = num_bytes;
1602                         btrfs_release_path(path);
1603                         goto again;
1604                 }
1605         }
1606
1607         if (ret && !insert) {
1608                 err = -ENOENT;
1609                 goto out;
1610         } else if (WARN_ON(ret)) {
1611                 err = -EIO;
1612                 goto out;
1613         }
1614
1615         leaf = path->nodes[0];
1616         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1617 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1618         if (item_size < sizeof(*ei)) {
1619                 if (!insert) {
1620                         err = -ENOENT;
1621                         goto out;
1622                 }
1623                 ret = convert_extent_item_v0(trans, root, path, owner,
1624                                              extra_size);
1625                 if (ret < 0) {
1626                         err = ret;
1627                         goto out;
1628                 }
1629                 leaf = path->nodes[0];
1630                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1631         }
1632 #endif
1633         BUG_ON(item_size < sizeof(*ei));
1634
1635         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1636         flags = btrfs_extent_flags(leaf, ei);
1637
1638         ptr = (unsigned long)(ei + 1);
1639         end = (unsigned long)ei + item_size;
1640
1641         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1642                 ptr += sizeof(struct btrfs_tree_block_info);
1643                 BUG_ON(ptr > end);
1644         }
1645
1646         err = -ENOENT;
1647         while (1) {
1648                 if (ptr >= end) {
1649                         WARN_ON(ptr > end);
1650                         break;
1651                 }
1652                 iref = (struct btrfs_extent_inline_ref *)ptr;
1653                 type = btrfs_extent_inline_ref_type(leaf, iref);
1654                 if (want < type)
1655                         break;
1656                 if (want > type) {
1657                         ptr += btrfs_extent_inline_ref_size(type);
1658                         continue;
1659                 }
1660
1661                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1662                         struct btrfs_extent_data_ref *dref;
1663                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1664                         if (match_extent_data_ref(leaf, dref, root_objectid,
1665                                                   owner, offset)) {
1666                                 err = 0;
1667                                 break;
1668                         }
1669                         if (hash_extent_data_ref_item(leaf, dref) <
1670                             hash_extent_data_ref(root_objectid, owner, offset))
1671                                 break;
1672                 } else {
1673                         u64 ref_offset;
1674                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1675                         if (parent > 0) {
1676                                 if (parent == ref_offset) {
1677                                         err = 0;
1678                                         break;
1679                                 }
1680                                 if (ref_offset < parent)
1681                                         break;
1682                         } else {
1683                                 if (root_objectid == ref_offset) {
1684                                         err = 0;
1685                                         break;
1686                                 }
1687                                 if (ref_offset < root_objectid)
1688                                         break;
1689                         }
1690                 }
1691                 ptr += btrfs_extent_inline_ref_size(type);
1692         }
1693         if (err == -ENOENT && insert) {
1694                 if (item_size + extra_size >=
1695                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1696                         err = -EAGAIN;
1697                         goto out;
1698                 }
1699                 /*
1700                  * To add new inline back ref, we have to make sure
1701                  * there is no corresponding back ref item.
1702                  * For simplicity, we just do not add new inline back
1703                  * ref if there is any kind of item for this block
1704                  */
1705                 if (find_next_key(path, 0, &key) == 0 &&
1706                     key.objectid == bytenr &&
1707                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1708                         err = -EAGAIN;
1709                         goto out;
1710                 }
1711         }
1712         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1713 out:
1714         if (insert) {
1715                 path->keep_locks = 0;
1716                 btrfs_unlock_up_safe(path, 1);
1717         }
1718         return err;
1719 }
1720
1721 /*
1722  * helper to add new inline back ref
1723  */
1724 static noinline_for_stack
1725 void setup_inline_extent_backref(struct btrfs_root *root,
1726                                  struct btrfs_path *path,
1727                                  struct btrfs_extent_inline_ref *iref,
1728                                  u64 parent, u64 root_objectid,
1729                                  u64 owner, u64 offset, int refs_to_add,
1730                                  struct btrfs_delayed_extent_op *extent_op)
1731 {
1732         struct extent_buffer *leaf;
1733         struct btrfs_extent_item *ei;
1734         unsigned long ptr;
1735         unsigned long end;
1736         unsigned long item_offset;
1737         u64 refs;
1738         int size;
1739         int type;
1740
1741         leaf = path->nodes[0];
1742         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1743         item_offset = (unsigned long)iref - (unsigned long)ei;
1744
1745         type = extent_ref_type(parent, owner);
1746         size = btrfs_extent_inline_ref_size(type);
1747
1748         btrfs_extend_item(root, path, size);
1749
1750         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1751         refs = btrfs_extent_refs(leaf, ei);
1752         refs += refs_to_add;
1753         btrfs_set_extent_refs(leaf, ei, refs);
1754         if (extent_op)
1755                 __run_delayed_extent_op(extent_op, leaf, ei);
1756
1757         ptr = (unsigned long)ei + item_offset;
1758         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1759         if (ptr < end - size)
1760                 memmove_extent_buffer(leaf, ptr + size, ptr,
1761                                       end - size - ptr);
1762
1763         iref = (struct btrfs_extent_inline_ref *)ptr;
1764         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1765         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1766                 struct btrfs_extent_data_ref *dref;
1767                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1768                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1769                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1770                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1771                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1772         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1773                 struct btrfs_shared_data_ref *sref;
1774                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1775                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1776                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1777         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1778                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1779         } else {
1780                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1781         }
1782         btrfs_mark_buffer_dirty(leaf);
1783 }
1784
1785 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1786                                  struct btrfs_root *root,
1787                                  struct btrfs_path *path,
1788                                  struct btrfs_extent_inline_ref **ref_ret,
1789                                  u64 bytenr, u64 num_bytes, u64 parent,
1790                                  u64 root_objectid, u64 owner, u64 offset)
1791 {
1792         int ret;
1793
1794         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1795                                            bytenr, num_bytes, parent,
1796                                            root_objectid, owner, offset, 0);
1797         if (ret != -ENOENT)
1798                 return ret;
1799
1800         btrfs_release_path(path);
1801         *ref_ret = NULL;
1802
1803         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1804                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1805                                             root_objectid);
1806         } else {
1807                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1808                                              root_objectid, owner, offset);
1809         }
1810         return ret;
1811 }
1812
1813 /*
1814  * helper to update/remove inline back ref
1815  */
1816 static noinline_for_stack
1817 void update_inline_extent_backref(struct btrfs_root *root,
1818                                   struct btrfs_path *path,
1819                                   struct btrfs_extent_inline_ref *iref,
1820                                   int refs_to_mod,
1821                                   struct btrfs_delayed_extent_op *extent_op,
1822                                   int *last_ref)
1823 {
1824         struct extent_buffer *leaf;
1825         struct btrfs_extent_item *ei;
1826         struct btrfs_extent_data_ref *dref = NULL;
1827         struct btrfs_shared_data_ref *sref = NULL;
1828         unsigned long ptr;
1829         unsigned long end;
1830         u32 item_size;
1831         int size;
1832         int type;
1833         u64 refs;
1834
1835         leaf = path->nodes[0];
1836         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1837         refs = btrfs_extent_refs(leaf, ei);
1838         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1839         refs += refs_to_mod;
1840         btrfs_set_extent_refs(leaf, ei, refs);
1841         if (extent_op)
1842                 __run_delayed_extent_op(extent_op, leaf, ei);
1843
1844         type = btrfs_extent_inline_ref_type(leaf, iref);
1845
1846         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1847                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1848                 refs = btrfs_extent_data_ref_count(leaf, dref);
1849         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1850                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1851                 refs = btrfs_shared_data_ref_count(leaf, sref);
1852         } else {
1853                 refs = 1;
1854                 BUG_ON(refs_to_mod != -1);
1855         }
1856
1857         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1858         refs += refs_to_mod;
1859
1860         if (refs > 0) {
1861                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1862                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1863                 else
1864                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1865         } else {
1866                 *last_ref = 1;
1867                 size =  btrfs_extent_inline_ref_size(type);
1868                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1869                 ptr = (unsigned long)iref;
1870                 end = (unsigned long)ei + item_size;
1871                 if (ptr + size < end)
1872                         memmove_extent_buffer(leaf, ptr, ptr + size,
1873                                               end - ptr - size);
1874                 item_size -= size;
1875                 btrfs_truncate_item(root, path, item_size, 1);
1876         }
1877         btrfs_mark_buffer_dirty(leaf);
1878 }
1879
1880 static noinline_for_stack
1881 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1882                                  struct btrfs_root *root,
1883                                  struct btrfs_path *path,
1884                                  u64 bytenr, u64 num_bytes, u64 parent,
1885                                  u64 root_objectid, u64 owner,
1886                                  u64 offset, int refs_to_add,
1887                                  struct btrfs_delayed_extent_op *extent_op)
1888 {
1889         struct btrfs_extent_inline_ref *iref;
1890         int ret;
1891
1892         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1893                                            bytenr, num_bytes, parent,
1894                                            root_objectid, owner, offset, 1);
1895         if (ret == 0) {
1896                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1897                 update_inline_extent_backref(root, path, iref,
1898                                              refs_to_add, extent_op, NULL);
1899         } else if (ret == -ENOENT) {
1900                 setup_inline_extent_backref(root, path, iref, parent,
1901                                             root_objectid, owner, offset,
1902                                             refs_to_add, extent_op);
1903                 ret = 0;
1904         }
1905         return ret;
1906 }
1907
1908 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1909                                  struct btrfs_root *root,
1910                                  struct btrfs_path *path,
1911                                  u64 bytenr, u64 parent, u64 root_objectid,
1912                                  u64 owner, u64 offset, int refs_to_add)
1913 {
1914         int ret;
1915         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1916                 BUG_ON(refs_to_add != 1);
1917                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1918                                             parent, root_objectid);
1919         } else {
1920                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1921                                              parent, root_objectid,
1922                                              owner, offset, refs_to_add);
1923         }
1924         return ret;
1925 }
1926
1927 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1928                                  struct btrfs_root *root,
1929                                  struct btrfs_path *path,
1930                                  struct btrfs_extent_inline_ref *iref,
1931                                  int refs_to_drop, int is_data, int *last_ref)
1932 {
1933         int ret = 0;
1934
1935         BUG_ON(!is_data && refs_to_drop != 1);
1936         if (iref) {
1937                 update_inline_extent_backref(root, path, iref,
1938                                              -refs_to_drop, NULL, last_ref);
1939         } else if (is_data) {
1940                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1941                                              last_ref);
1942         } else {
1943                 *last_ref = 1;
1944                 ret = btrfs_del_item(trans, root, path);
1945         }
1946         return ret;
1947 }
1948
1949 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1950 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1951                                u64 *discarded_bytes)
1952 {
1953         int j, ret = 0;
1954         u64 bytes_left, end;
1955         u64 aligned_start = ALIGN(start, 1 << 9);
1956
1957         if (WARN_ON(start != aligned_start)) {
1958                 len -= aligned_start - start;
1959                 len = round_down(len, 1 << 9);
1960                 start = aligned_start;
1961         }
1962
1963         *discarded_bytes = 0;
1964
1965         if (!len)
1966                 return 0;
1967
1968         end = start + len;
1969         bytes_left = len;
1970
1971         /* Skip any superblocks on this device. */
1972         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1973                 u64 sb_start = btrfs_sb_offset(j);
1974                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1975                 u64 size = sb_start - start;
1976
1977                 if (!in_range(sb_start, start, bytes_left) &&
1978                     !in_range(sb_end, start, bytes_left) &&
1979                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1980                         continue;
1981
1982                 /*
1983                  * Superblock spans beginning of range.  Adjust start and
1984                  * try again.
1985                  */
1986                 if (sb_start <= start) {
1987                         start += sb_end - start;
1988                         if (start > end) {
1989                                 bytes_left = 0;
1990                                 break;
1991                         }
1992                         bytes_left = end - start;
1993                         continue;
1994                 }
1995
1996                 if (size) {
1997                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1998                                                    GFP_NOFS, 0);
1999                         if (!ret)
2000                                 *discarded_bytes += size;
2001                         else if (ret != -EOPNOTSUPP)
2002                                 return ret;
2003                 }
2004
2005                 start = sb_end;
2006                 if (start > end) {
2007                         bytes_left = 0;
2008                         break;
2009                 }
2010                 bytes_left = end - start;
2011         }
2012
2013         if (bytes_left) {
2014                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2015                                            GFP_NOFS, 0);
2016                 if (!ret)
2017                         *discarded_bytes += bytes_left;
2018         }
2019         return ret;
2020 }
2021
2022 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2023                          u64 num_bytes, u64 *actual_bytes)
2024 {
2025         int ret;
2026         u64 discarded_bytes = 0;
2027         struct btrfs_bio *bbio = NULL;
2028
2029
2030         /* Tell the block device(s) that the sectors can be discarded */
2031         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2032                               bytenr, &num_bytes, &bbio, 0);
2033         /* Error condition is -ENOMEM */
2034         if (!ret) {
2035                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2036                 int i;
2037
2038
2039                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040                         u64 bytes;
2041                         if (!stripe->dev->can_discard)
2042                                 continue;
2043
2044                         ret = btrfs_issue_discard(stripe->dev->bdev,
2045                                                   stripe->physical,
2046                                                   stripe->length,
2047                                                   &bytes);
2048                         if (!ret)
2049                                 discarded_bytes += bytes;
2050                         else if (ret != -EOPNOTSUPP)
2051                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2052
2053                         /*
2054                          * Just in case we get back EOPNOTSUPP for some reason,
2055                          * just ignore the return value so we don't screw up
2056                          * people calling discard_extent.
2057                          */
2058                         ret = 0;
2059                 }
2060                 btrfs_put_bbio(bbio);
2061         }
2062
2063         if (actual_bytes)
2064                 *actual_bytes = discarded_bytes;
2065
2066
2067         if (ret == -EOPNOTSUPP)
2068                 ret = 0;
2069         return ret;
2070 }
2071
2072 /* Can return -ENOMEM */
2073 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2074                          struct btrfs_root *root,
2075                          u64 bytenr, u64 num_bytes, u64 parent,
2076                          u64 root_objectid, u64 owner, u64 offset,
2077                          int no_quota)
2078 {
2079         int ret;
2080         struct btrfs_fs_info *fs_info = root->fs_info;
2081
2082         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2083                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2084
2085         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2086                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2087                                         num_bytes,
2088                                         parent, root_objectid, (int)owner,
2089                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2090         } else {
2091                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2092                                         num_bytes,
2093                                         parent, root_objectid, owner, offset,
2094                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2095         }
2096         return ret;
2097 }
2098
2099 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2100                                   struct btrfs_root *root,
2101                                   struct btrfs_delayed_ref_node *node,
2102                                   u64 parent, u64 root_objectid,
2103                                   u64 owner, u64 offset, int refs_to_add,
2104                                   struct btrfs_delayed_extent_op *extent_op)
2105 {
2106         struct btrfs_fs_info *fs_info = root->fs_info;
2107         struct btrfs_path *path;
2108         struct extent_buffer *leaf;
2109         struct btrfs_extent_item *item;
2110         struct btrfs_key key;
2111         u64 bytenr = node->bytenr;
2112         u64 num_bytes = node->num_bytes;
2113         u64 refs;
2114         int ret;
2115         int no_quota = node->no_quota;
2116
2117         path = btrfs_alloc_path();
2118         if (!path)
2119                 return -ENOMEM;
2120
2121         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2122                 no_quota = 1;
2123
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         /* this will setup the path even if it fails to insert the back ref */
2127         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2128                                            bytenr, num_bytes, parent,
2129                                            root_objectid, owner, offset,
2130                                            refs_to_add, extent_op);
2131         if ((ret < 0 && ret != -EAGAIN) || !ret)
2132                 goto out;
2133
2134         /*
2135          * Ok we had -EAGAIN which means we didn't have space to insert and
2136          * inline extent ref, so just update the reference count and add a
2137          * normal backref.
2138          */
2139         leaf = path->nodes[0];
2140         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2141         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2142         refs = btrfs_extent_refs(leaf, item);
2143         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2144         if (extent_op)
2145                 __run_delayed_extent_op(extent_op, leaf, item);
2146
2147         btrfs_mark_buffer_dirty(leaf);
2148         btrfs_release_path(path);
2149
2150         path->reada = 1;
2151         path->leave_spinning = 1;
2152         /* now insert the actual backref */
2153         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2154                                     path, bytenr, parent, root_objectid,
2155                                     owner, offset, refs_to_add);
2156         if (ret)
2157                 btrfs_abort_transaction(trans, root, ret);
2158 out:
2159         btrfs_free_path(path);
2160         return ret;
2161 }
2162
2163 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2164                                 struct btrfs_root *root,
2165                                 struct btrfs_delayed_ref_node *node,
2166                                 struct btrfs_delayed_extent_op *extent_op,
2167                                 int insert_reserved)
2168 {
2169         int ret = 0;
2170         struct btrfs_delayed_data_ref *ref;
2171         struct btrfs_key ins;
2172         u64 parent = 0;
2173         u64 ref_root = 0;
2174         u64 flags = 0;
2175
2176         ins.objectid = node->bytenr;
2177         ins.offset = node->num_bytes;
2178         ins.type = BTRFS_EXTENT_ITEM_KEY;
2179
2180         ref = btrfs_delayed_node_to_data_ref(node);
2181         trace_run_delayed_data_ref(node, ref, node->action);
2182
2183         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2184                 parent = ref->parent;
2185         ref_root = ref->root;
2186
2187         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2188                 if (extent_op)
2189                         flags |= extent_op->flags_to_set;
2190                 ret = alloc_reserved_file_extent(trans, root,
2191                                                  parent, ref_root, flags,
2192                                                  ref->objectid, ref->offset,
2193                                                  &ins, node->ref_mod);
2194         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2195                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2196                                              ref_root, ref->objectid,
2197                                              ref->offset, node->ref_mod,
2198                                              extent_op);
2199         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2200                 ret = __btrfs_free_extent(trans, root, node, parent,
2201                                           ref_root, ref->objectid,
2202                                           ref->offset, node->ref_mod,
2203                                           extent_op);
2204         } else {
2205                 BUG();
2206         }
2207         return ret;
2208 }
2209
2210 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2211                                     struct extent_buffer *leaf,
2212                                     struct btrfs_extent_item *ei)
2213 {
2214         u64 flags = btrfs_extent_flags(leaf, ei);
2215         if (extent_op->update_flags) {
2216                 flags |= extent_op->flags_to_set;
2217                 btrfs_set_extent_flags(leaf, ei, flags);
2218         }
2219
2220         if (extent_op->update_key) {
2221                 struct btrfs_tree_block_info *bi;
2222                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2223                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2224                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2225         }
2226 }
2227
2228 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2229                                  struct btrfs_root *root,
2230                                  struct btrfs_delayed_ref_node *node,
2231                                  struct btrfs_delayed_extent_op *extent_op)
2232 {
2233         struct btrfs_key key;
2234         struct btrfs_path *path;
2235         struct btrfs_extent_item *ei;
2236         struct extent_buffer *leaf;
2237         u32 item_size;
2238         int ret;
2239         int err = 0;
2240         int metadata = !extent_op->is_data;
2241
2242         if (trans->aborted)
2243                 return 0;
2244
2245         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2246                 metadata = 0;
2247
2248         path = btrfs_alloc_path();
2249         if (!path)
2250                 return -ENOMEM;
2251
2252         key.objectid = node->bytenr;
2253
2254         if (metadata) {
2255                 key.type = BTRFS_METADATA_ITEM_KEY;
2256                 key.offset = extent_op->level;
2257         } else {
2258                 key.type = BTRFS_EXTENT_ITEM_KEY;
2259                 key.offset = node->num_bytes;
2260         }
2261
2262 again:
2263         path->reada = 1;
2264         path->leave_spinning = 1;
2265         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2266                                 path, 0, 1);
2267         if (ret < 0) {
2268                 err = ret;
2269                 goto out;
2270         }
2271         if (ret > 0) {
2272                 if (metadata) {
2273                         if (path->slots[0] > 0) {
2274                                 path->slots[0]--;
2275                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2276                                                       path->slots[0]);
2277                                 if (key.objectid == node->bytenr &&
2278                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2279                                     key.offset == node->num_bytes)
2280                                         ret = 0;
2281                         }
2282                         if (ret > 0) {
2283                                 btrfs_release_path(path);
2284                                 metadata = 0;
2285
2286                                 key.objectid = node->bytenr;
2287                                 key.offset = node->num_bytes;
2288                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2289                                 goto again;
2290                         }
2291                 } else {
2292                         err = -EIO;
2293                         goto out;
2294                 }
2295         }
2296
2297         leaf = path->nodes[0];
2298         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2299 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2300         if (item_size < sizeof(*ei)) {
2301                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2302                                              path, (u64)-1, 0);
2303                 if (ret < 0) {
2304                         err = ret;
2305                         goto out;
2306                 }
2307                 leaf = path->nodes[0];
2308                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309         }
2310 #endif
2311         BUG_ON(item_size < sizeof(*ei));
2312         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2313         __run_delayed_extent_op(extent_op, leaf, ei);
2314
2315         btrfs_mark_buffer_dirty(leaf);
2316 out:
2317         btrfs_free_path(path);
2318         return err;
2319 }
2320
2321 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2322                                 struct btrfs_root *root,
2323                                 struct btrfs_delayed_ref_node *node,
2324                                 struct btrfs_delayed_extent_op *extent_op,
2325                                 int insert_reserved)
2326 {
2327         int ret = 0;
2328         struct btrfs_delayed_tree_ref *ref;
2329         struct btrfs_key ins;
2330         u64 parent = 0;
2331         u64 ref_root = 0;
2332         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2333                                                  SKINNY_METADATA);
2334
2335         ref = btrfs_delayed_node_to_tree_ref(node);
2336         trace_run_delayed_tree_ref(node, ref, node->action);
2337
2338         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2339                 parent = ref->parent;
2340         ref_root = ref->root;
2341
2342         ins.objectid = node->bytenr;
2343         if (skinny_metadata) {
2344                 ins.offset = ref->level;
2345                 ins.type = BTRFS_METADATA_ITEM_KEY;
2346         } else {
2347                 ins.offset = node->num_bytes;
2348                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2349         }
2350
2351         BUG_ON(node->ref_mod != 1);
2352         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353                 BUG_ON(!extent_op || !extent_op->update_flags);
2354                 ret = alloc_reserved_tree_block(trans, root,
2355                                                 parent, ref_root,
2356                                                 extent_op->flags_to_set,
2357                                                 &extent_op->key,
2358                                                 ref->level, &ins,
2359                                                 node->no_quota);
2360         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2361                 ret = __btrfs_inc_extent_ref(trans, root, node,
2362                                              parent, ref_root,
2363                                              ref->level, 0, 1,
2364                                              extent_op);
2365         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2366                 ret = __btrfs_free_extent(trans, root, node,
2367                                           parent, ref_root,
2368                                           ref->level, 0, 1, extent_op);
2369         } else {
2370                 BUG();
2371         }
2372         return ret;
2373 }
2374
2375 /* helper function to actually process a single delayed ref entry */
2376 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2377                                struct btrfs_root *root,
2378                                struct btrfs_delayed_ref_node *node,
2379                                struct btrfs_delayed_extent_op *extent_op,
2380                                int insert_reserved)
2381 {
2382         int ret = 0;
2383
2384         if (trans->aborted) {
2385                 if (insert_reserved)
2386                         btrfs_pin_extent(root, node->bytenr,
2387                                          node->num_bytes, 1);
2388                 return 0;
2389         }
2390
2391         if (btrfs_delayed_ref_is_head(node)) {
2392                 struct btrfs_delayed_ref_head *head;
2393                 /*
2394                  * we've hit the end of the chain and we were supposed
2395                  * to insert this extent into the tree.  But, it got
2396                  * deleted before we ever needed to insert it, so all
2397                  * we have to do is clean up the accounting
2398                  */
2399                 BUG_ON(extent_op);
2400                 head = btrfs_delayed_node_to_head(node);
2401                 trace_run_delayed_ref_head(node, head, node->action);
2402
2403                 if (insert_reserved) {
2404                         btrfs_pin_extent(root, node->bytenr,
2405                                          node->num_bytes, 1);
2406                         if (head->is_data) {
2407                                 ret = btrfs_del_csums(trans, root,
2408                                                       node->bytenr,
2409                                                       node->num_bytes);
2410                         }
2411                 }
2412
2413                 /* Also free its reserved qgroup space */
2414                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2415                                               head->qgroup_ref_root,
2416                                               head->qgroup_reserved);
2417                 return ret;
2418         }
2419
2420         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2421             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2422                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2423                                            insert_reserved);
2424         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2425                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2426                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2427                                            insert_reserved);
2428         else
2429                 BUG();
2430         return ret;
2431 }
2432
2433 static inline struct btrfs_delayed_ref_node *
2434 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2435 {
2436         struct btrfs_delayed_ref_node *ref;
2437
2438         if (list_empty(&head->ref_list))
2439                 return NULL;
2440
2441         /*
2442          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2443          * This is to prevent a ref count from going down to zero, which deletes
2444          * the extent item from the extent tree, when there still are references
2445          * to add, which would fail because they would not find the extent item.
2446          */
2447         list_for_each_entry(ref, &head->ref_list, list) {
2448                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2449                         return ref;
2450         }
2451
2452         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2453                           list);
2454 }
2455
2456 /*
2457  * Returns 0 on success or if called with an already aborted transaction.
2458  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2459  */
2460 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2461                                              struct btrfs_root *root,
2462                                              unsigned long nr)
2463 {
2464         struct btrfs_delayed_ref_root *delayed_refs;
2465         struct btrfs_delayed_ref_node *ref;
2466         struct btrfs_delayed_ref_head *locked_ref = NULL;
2467         struct btrfs_delayed_extent_op *extent_op;
2468         struct btrfs_fs_info *fs_info = root->fs_info;
2469         ktime_t start = ktime_get();
2470         int ret;
2471         unsigned long count = 0;
2472         unsigned long actual_count = 0;
2473         int must_insert_reserved = 0;
2474
2475         delayed_refs = &trans->transaction->delayed_refs;
2476         while (1) {
2477                 if (!locked_ref) {
2478                         if (count >= nr)
2479                                 break;
2480
2481                         spin_lock(&delayed_refs->lock);
2482                         locked_ref = btrfs_select_ref_head(trans);
2483                         if (!locked_ref) {
2484                                 spin_unlock(&delayed_refs->lock);
2485                                 break;
2486                         }
2487
2488                         /* grab the lock that says we are going to process
2489                          * all the refs for this head */
2490                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2491                         spin_unlock(&delayed_refs->lock);
2492                         /*
2493                          * we may have dropped the spin lock to get the head
2494                          * mutex lock, and that might have given someone else
2495                          * time to free the head.  If that's true, it has been
2496                          * removed from our list and we can move on.
2497                          */
2498                         if (ret == -EAGAIN) {
2499                                 locked_ref = NULL;
2500                                 count++;
2501                                 continue;
2502                         }
2503                 }
2504
2505                 /*
2506                  * We need to try and merge add/drops of the same ref since we
2507                  * can run into issues with relocate dropping the implicit ref
2508                  * and then it being added back again before the drop can
2509                  * finish.  If we merged anything we need to re-loop so we can
2510                  * get a good ref.
2511                  * Or we can get node references of the same type that weren't
2512                  * merged when created due to bumps in the tree mod seq, and
2513                  * we need to merge them to prevent adding an inline extent
2514                  * backref before dropping it (triggering a BUG_ON at
2515                  * insert_inline_extent_backref()).
2516                  */
2517                 spin_lock(&locked_ref->lock);
2518                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2519                                          locked_ref);
2520
2521                 /*
2522                  * locked_ref is the head node, so we have to go one
2523                  * node back for any delayed ref updates
2524                  */
2525                 ref = select_delayed_ref(locked_ref);
2526
2527                 if (ref && ref->seq &&
2528                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2529                         spin_unlock(&locked_ref->lock);
2530                         btrfs_delayed_ref_unlock(locked_ref);
2531                         spin_lock(&delayed_refs->lock);
2532                         locked_ref->processing = 0;
2533                         delayed_refs->num_heads_ready++;
2534                         spin_unlock(&delayed_refs->lock);
2535                         locked_ref = NULL;
2536                         cond_resched();
2537                         count++;
2538                         continue;
2539                 }
2540
2541                 /*
2542                  * record the must insert reserved flag before we
2543                  * drop the spin lock.
2544                  */
2545                 must_insert_reserved = locked_ref->must_insert_reserved;
2546                 locked_ref->must_insert_reserved = 0;
2547
2548                 extent_op = locked_ref->extent_op;
2549                 locked_ref->extent_op = NULL;
2550
2551                 if (!ref) {
2552
2553
2554                         /* All delayed refs have been processed, Go ahead
2555                          * and send the head node to run_one_delayed_ref,
2556                          * so that any accounting fixes can happen
2557                          */
2558                         ref = &locked_ref->node;
2559
2560                         if (extent_op && must_insert_reserved) {
2561                                 btrfs_free_delayed_extent_op(extent_op);
2562                                 extent_op = NULL;
2563                         }
2564
2565                         if (extent_op) {
2566                                 spin_unlock(&locked_ref->lock);
2567                                 ret = run_delayed_extent_op(trans, root,
2568                                                             ref, extent_op);
2569                                 btrfs_free_delayed_extent_op(extent_op);
2570
2571                                 if (ret) {
2572                                         /*
2573                                          * Need to reset must_insert_reserved if
2574                                          * there was an error so the abort stuff
2575                                          * can cleanup the reserved space
2576                                          * properly.
2577                                          */
2578                                         if (must_insert_reserved)
2579                                                 locked_ref->must_insert_reserved = 1;
2580                                         locked_ref->processing = 0;
2581                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2582                                         btrfs_delayed_ref_unlock(locked_ref);
2583                                         return ret;
2584                                 }
2585                                 continue;
2586                         }
2587
2588                         /*
2589                          * Need to drop our head ref lock and re-aqcuire the
2590                          * delayed ref lock and then re-check to make sure
2591                          * nobody got added.
2592                          */
2593                         spin_unlock(&locked_ref->lock);
2594                         spin_lock(&delayed_refs->lock);
2595                         spin_lock(&locked_ref->lock);
2596                         if (!list_empty(&locked_ref->ref_list) ||
2597                             locked_ref->extent_op) {
2598                                 spin_unlock(&locked_ref->lock);
2599                                 spin_unlock(&delayed_refs->lock);
2600                                 continue;
2601                         }
2602                         ref->in_tree = 0;
2603                         delayed_refs->num_heads--;
2604                         rb_erase(&locked_ref->href_node,
2605                                  &delayed_refs->href_root);
2606                         spin_unlock(&delayed_refs->lock);
2607                 } else {
2608                         actual_count++;
2609                         ref->in_tree = 0;
2610                         list_del(&ref->list);
2611                 }
2612                 atomic_dec(&delayed_refs->num_entries);
2613
2614                 if (!btrfs_delayed_ref_is_head(ref)) {
2615                         /*
2616                          * when we play the delayed ref, also correct the
2617                          * ref_mod on head
2618                          */
2619                         switch (ref->action) {
2620                         case BTRFS_ADD_DELAYED_REF:
2621                         case BTRFS_ADD_DELAYED_EXTENT:
2622                                 locked_ref->node.ref_mod -= ref->ref_mod;
2623                                 break;
2624                         case BTRFS_DROP_DELAYED_REF:
2625                                 locked_ref->node.ref_mod += ref->ref_mod;
2626                                 break;
2627                         default:
2628                                 WARN_ON(1);
2629                         }
2630                 }
2631                 spin_unlock(&locked_ref->lock);
2632
2633                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2634                                           must_insert_reserved);
2635
2636                 btrfs_free_delayed_extent_op(extent_op);
2637                 if (ret) {
2638                         locked_ref->processing = 0;
2639                         btrfs_delayed_ref_unlock(locked_ref);
2640                         btrfs_put_delayed_ref(ref);
2641                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2642                         return ret;
2643                 }
2644
2645                 /*
2646                  * If this node is a head, that means all the refs in this head
2647                  * have been dealt with, and we will pick the next head to deal
2648                  * with, so we must unlock the head and drop it from the cluster
2649                  * list before we release it.
2650                  */
2651                 if (btrfs_delayed_ref_is_head(ref)) {
2652                         if (locked_ref->is_data &&
2653                             locked_ref->total_ref_mod < 0) {
2654                                 spin_lock(&delayed_refs->lock);
2655                                 delayed_refs->pending_csums -= ref->num_bytes;
2656                                 spin_unlock(&delayed_refs->lock);
2657                         }
2658                         btrfs_delayed_ref_unlock(locked_ref);
2659                         locked_ref = NULL;
2660                 }
2661                 btrfs_put_delayed_ref(ref);
2662                 count++;
2663                 cond_resched();
2664         }
2665
2666         /*
2667          * We don't want to include ref heads since we can have empty ref heads
2668          * and those will drastically skew our runtime down since we just do
2669          * accounting, no actual extent tree updates.
2670          */
2671         if (actual_count > 0) {
2672                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2673                 u64 avg;
2674
2675                 /*
2676                  * We weigh the current average higher than our current runtime
2677                  * to avoid large swings in the average.
2678                  */
2679                 spin_lock(&delayed_refs->lock);
2680                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2681                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2682                 spin_unlock(&delayed_refs->lock);
2683         }
2684         return 0;
2685 }
2686
2687 #ifdef SCRAMBLE_DELAYED_REFS
2688 /*
2689  * Normally delayed refs get processed in ascending bytenr order. This
2690  * correlates in most cases to the order added. To expose dependencies on this
2691  * order, we start to process the tree in the middle instead of the beginning
2692  */
2693 static u64 find_middle(struct rb_root *root)
2694 {
2695         struct rb_node *n = root->rb_node;
2696         struct btrfs_delayed_ref_node *entry;
2697         int alt = 1;
2698         u64 middle;
2699         u64 first = 0, last = 0;
2700
2701         n = rb_first(root);
2702         if (n) {
2703                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2704                 first = entry->bytenr;
2705         }
2706         n = rb_last(root);
2707         if (n) {
2708                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2709                 last = entry->bytenr;
2710         }
2711         n = root->rb_node;
2712
2713         while (n) {
2714                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2715                 WARN_ON(!entry->in_tree);
2716
2717                 middle = entry->bytenr;
2718
2719                 if (alt)
2720                         n = n->rb_left;
2721                 else
2722                         n = n->rb_right;
2723
2724                 alt = 1 - alt;
2725         }
2726         return middle;
2727 }
2728 #endif
2729
2730 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2731 {
2732         u64 num_bytes;
2733
2734         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2735                              sizeof(struct btrfs_extent_inline_ref));
2736         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2737                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2738
2739         /*
2740          * We don't ever fill up leaves all the way so multiply by 2 just to be
2741          * closer to what we're really going to want to ouse.
2742          */
2743         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2744 }
2745
2746 /*
2747  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2748  * would require to store the csums for that many bytes.
2749  */
2750 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2751 {
2752         u64 csum_size;
2753         u64 num_csums_per_leaf;
2754         u64 num_csums;
2755
2756         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2757         num_csums_per_leaf = div64_u64(csum_size,
2758                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2759         num_csums = div64_u64(csum_bytes, root->sectorsize);
2760         num_csums += num_csums_per_leaf - 1;
2761         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2762         return num_csums;
2763 }
2764
2765 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2766                                        struct btrfs_root *root)
2767 {
2768         struct btrfs_block_rsv *global_rsv;
2769         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2770         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2771         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2772         u64 num_bytes, num_dirty_bgs_bytes;
2773         int ret = 0;
2774
2775         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2776         num_heads = heads_to_leaves(root, num_heads);
2777         if (num_heads > 1)
2778                 num_bytes += (num_heads - 1) * root->nodesize;
2779         num_bytes <<= 1;
2780         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2781         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2782                                                              num_dirty_bgs);
2783         global_rsv = &root->fs_info->global_block_rsv;
2784
2785         /*
2786          * If we can't allocate any more chunks lets make sure we have _lots_ of
2787          * wiggle room since running delayed refs can create more delayed refs.
2788          */
2789         if (global_rsv->space_info->full) {
2790                 num_dirty_bgs_bytes <<= 1;
2791                 num_bytes <<= 1;
2792         }
2793
2794         spin_lock(&global_rsv->lock);
2795         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2796                 ret = 1;
2797         spin_unlock(&global_rsv->lock);
2798         return ret;
2799 }
2800
2801 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2802                                        struct btrfs_root *root)
2803 {
2804         struct btrfs_fs_info *fs_info = root->fs_info;
2805         u64 num_entries =
2806                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2807         u64 avg_runtime;
2808         u64 val;
2809
2810         smp_mb();
2811         avg_runtime = fs_info->avg_delayed_ref_runtime;
2812         val = num_entries * avg_runtime;
2813         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2814                 return 1;
2815         if (val >= NSEC_PER_SEC / 2)
2816                 return 2;
2817
2818         return btrfs_check_space_for_delayed_refs(trans, root);
2819 }
2820
2821 struct async_delayed_refs {
2822         struct btrfs_root *root;
2823         int count;
2824         int error;
2825         int sync;
2826         struct completion wait;
2827         struct btrfs_work work;
2828 };
2829
2830 static void delayed_ref_async_start(struct btrfs_work *work)
2831 {
2832         struct async_delayed_refs *async;
2833         struct btrfs_trans_handle *trans;
2834         int ret;
2835
2836         async = container_of(work, struct async_delayed_refs, work);
2837
2838         trans = btrfs_join_transaction(async->root);
2839         if (IS_ERR(trans)) {
2840                 async->error = PTR_ERR(trans);
2841                 goto done;
2842         }
2843
2844         /*
2845          * trans->sync means that when we call end_transaciton, we won't
2846          * wait on delayed refs
2847          */
2848         trans->sync = true;
2849         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2850         if (ret)
2851                 async->error = ret;
2852
2853         ret = btrfs_end_transaction(trans, async->root);
2854         if (ret && !async->error)
2855                 async->error = ret;
2856 done:
2857         if (async->sync)
2858                 complete(&async->wait);
2859         else
2860                 kfree(async);
2861 }
2862
2863 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2864                                  unsigned long count, int wait)
2865 {
2866         struct async_delayed_refs *async;
2867         int ret;
2868
2869         async = kmalloc(sizeof(*async), GFP_NOFS);
2870         if (!async)
2871                 return -ENOMEM;
2872
2873         async->root = root->fs_info->tree_root;
2874         async->count = count;
2875         async->error = 0;
2876         if (wait)
2877                 async->sync = 1;
2878         else
2879                 async->sync = 0;
2880         init_completion(&async->wait);
2881
2882         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2883                         delayed_ref_async_start, NULL, NULL);
2884
2885         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2886
2887         if (wait) {
2888                 wait_for_completion(&async->wait);
2889                 ret = async->error;
2890                 kfree(async);
2891                 return ret;
2892         }
2893         return 0;
2894 }
2895
2896 /*
2897  * this starts processing the delayed reference count updates and
2898  * extent insertions we have queued up so far.  count can be
2899  * 0, which means to process everything in the tree at the start
2900  * of the run (but not newly added entries), or it can be some target
2901  * number you'd like to process.
2902  *
2903  * Returns 0 on success or if called with an aborted transaction
2904  * Returns <0 on error and aborts the transaction
2905  */
2906 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2907                            struct btrfs_root *root, unsigned long count)
2908 {
2909         struct rb_node *node;
2910         struct btrfs_delayed_ref_root *delayed_refs;
2911         struct btrfs_delayed_ref_head *head;
2912         int ret;
2913         int run_all = count == (unsigned long)-1;
2914         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2915
2916         /* We'll clean this up in btrfs_cleanup_transaction */
2917         if (trans->aborted)
2918                 return 0;
2919
2920         if (root == root->fs_info->extent_root)
2921                 root = root->fs_info->tree_root;
2922
2923         delayed_refs = &trans->transaction->delayed_refs;
2924         if (count == 0)
2925                 count = atomic_read(&delayed_refs->num_entries) * 2;
2926
2927 again:
2928 #ifdef SCRAMBLE_DELAYED_REFS
2929         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2930 #endif
2931         trans->can_flush_pending_bgs = false;
2932         ret = __btrfs_run_delayed_refs(trans, root, count);
2933         if (ret < 0) {
2934                 btrfs_abort_transaction(trans, root, ret);
2935                 return ret;
2936         }
2937
2938         if (run_all) {
2939                 if (!list_empty(&trans->new_bgs))
2940                         btrfs_create_pending_block_groups(trans, root);
2941
2942                 spin_lock(&delayed_refs->lock);
2943                 node = rb_first(&delayed_refs->href_root);
2944                 if (!node) {
2945                         spin_unlock(&delayed_refs->lock);
2946                         goto out;
2947                 }
2948                 count = (unsigned long)-1;
2949
2950                 while (node) {
2951                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2952                                         href_node);
2953                         if (btrfs_delayed_ref_is_head(&head->node)) {
2954                                 struct btrfs_delayed_ref_node *ref;
2955
2956                                 ref = &head->node;
2957                                 atomic_inc(&ref->refs);
2958
2959                                 spin_unlock(&delayed_refs->lock);
2960                                 /*
2961                                  * Mutex was contended, block until it's
2962                                  * released and try again
2963                                  */
2964                                 mutex_lock(&head->mutex);
2965                                 mutex_unlock(&head->mutex);
2966
2967                                 btrfs_put_delayed_ref(ref);
2968                                 cond_resched();
2969                                 goto again;
2970                         } else {
2971                                 WARN_ON(1);
2972                         }
2973                         node = rb_next(node);
2974                 }
2975                 spin_unlock(&delayed_refs->lock);
2976                 cond_resched();
2977                 goto again;
2978         }
2979 out:
2980         assert_qgroups_uptodate(trans);
2981         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2982         return 0;
2983 }
2984
2985 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2986                                 struct btrfs_root *root,
2987                                 u64 bytenr, u64 num_bytes, u64 flags,
2988                                 int level, int is_data)
2989 {
2990         struct btrfs_delayed_extent_op *extent_op;
2991         int ret;
2992
2993         extent_op = btrfs_alloc_delayed_extent_op();
2994         if (!extent_op)
2995                 return -ENOMEM;
2996
2997         extent_op->flags_to_set = flags;
2998         extent_op->update_flags = 1;
2999         extent_op->update_key = 0;
3000         extent_op->is_data = is_data ? 1 : 0;
3001         extent_op->level = level;
3002
3003         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3004                                           num_bytes, extent_op);
3005         if (ret)
3006                 btrfs_free_delayed_extent_op(extent_op);
3007         return ret;
3008 }
3009
3010 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3011                                       struct btrfs_root *root,
3012                                       struct btrfs_path *path,
3013                                       u64 objectid, u64 offset, u64 bytenr)
3014 {
3015         struct btrfs_delayed_ref_head *head;
3016         struct btrfs_delayed_ref_node *ref;
3017         struct btrfs_delayed_data_ref *data_ref;
3018         struct btrfs_delayed_ref_root *delayed_refs;
3019         int ret = 0;
3020
3021         delayed_refs = &trans->transaction->delayed_refs;
3022         spin_lock(&delayed_refs->lock);
3023         head = btrfs_find_delayed_ref_head(trans, bytenr);
3024         if (!head) {
3025                 spin_unlock(&delayed_refs->lock);
3026                 return 0;
3027         }
3028
3029         if (!mutex_trylock(&head->mutex)) {
3030                 atomic_inc(&head->node.refs);
3031                 spin_unlock(&delayed_refs->lock);
3032
3033                 btrfs_release_path(path);
3034
3035                 /*
3036                  * Mutex was contended, block until it's released and let
3037                  * caller try again
3038                  */
3039                 mutex_lock(&head->mutex);
3040                 mutex_unlock(&head->mutex);
3041                 btrfs_put_delayed_ref(&head->node);
3042                 return -EAGAIN;
3043         }
3044         spin_unlock(&delayed_refs->lock);
3045
3046         spin_lock(&head->lock);
3047         list_for_each_entry(ref, &head->ref_list, list) {
3048                 /* If it's a shared ref we know a cross reference exists */
3049                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3050                         ret = 1;
3051                         break;
3052                 }
3053
3054                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3055
3056                 /*
3057                  * If our ref doesn't match the one we're currently looking at
3058                  * then we have a cross reference.
3059                  */
3060                 if (data_ref->root != root->root_key.objectid ||
3061                     data_ref->objectid != objectid ||
3062                     data_ref->offset != offset) {
3063                         ret = 1;
3064                         break;
3065                 }
3066         }
3067         spin_unlock(&head->lock);
3068         mutex_unlock(&head->mutex);
3069         return ret;
3070 }
3071
3072 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3073                                         struct btrfs_root *root,
3074                                         struct btrfs_path *path,
3075                                         u64 objectid, u64 offset, u64 bytenr)
3076 {
3077         struct btrfs_root *extent_root = root->fs_info->extent_root;
3078         struct extent_buffer *leaf;
3079         struct btrfs_extent_data_ref *ref;
3080         struct btrfs_extent_inline_ref *iref;
3081         struct btrfs_extent_item *ei;
3082         struct btrfs_key key;
3083         u32 item_size;
3084         int ret;
3085
3086         key.objectid = bytenr;
3087         key.offset = (u64)-1;
3088         key.type = BTRFS_EXTENT_ITEM_KEY;
3089
3090         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3091         if (ret < 0)
3092                 goto out;
3093         BUG_ON(ret == 0); /* Corruption */
3094
3095         ret = -ENOENT;
3096         if (path->slots[0] == 0)
3097                 goto out;
3098
3099         path->slots[0]--;
3100         leaf = path->nodes[0];
3101         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3102
3103         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3104                 goto out;
3105
3106         ret = 1;
3107         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3108 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3109         if (item_size < sizeof(*ei)) {
3110                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3111                 goto out;
3112         }
3113 #endif
3114         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3115
3116         if (item_size != sizeof(*ei) +
3117             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3118                 goto out;
3119
3120         if (btrfs_extent_generation(leaf, ei) <=
3121             btrfs_root_last_snapshot(&root->root_item))
3122                 goto out;
3123
3124         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3125         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3126             BTRFS_EXTENT_DATA_REF_KEY)
3127                 goto out;
3128
3129         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3130         if (btrfs_extent_refs(leaf, ei) !=
3131             btrfs_extent_data_ref_count(leaf, ref) ||
3132             btrfs_extent_data_ref_root(leaf, ref) !=
3133             root->root_key.objectid ||
3134             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3135             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3136                 goto out;
3137
3138         ret = 0;
3139 out:
3140         return ret;
3141 }
3142
3143 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3144                           struct btrfs_root *root,
3145                           u64 objectid, u64 offset, u64 bytenr)
3146 {
3147         struct btrfs_path *path;
3148         int ret;
3149         int ret2;
3150
3151         path = btrfs_alloc_path();
3152         if (!path)
3153                 return -ENOENT;
3154
3155         do {
3156                 ret = check_committed_ref(trans, root, path, objectid,
3157                                           offset, bytenr);
3158                 if (ret && ret != -ENOENT)
3159                         goto out;
3160
3161                 ret2 = check_delayed_ref(trans, root, path, objectid,
3162                                          offset, bytenr);
3163         } while (ret2 == -EAGAIN);
3164
3165         if (ret2 && ret2 != -ENOENT) {
3166                 ret = ret2;
3167                 goto out;
3168         }
3169
3170         if (ret != -ENOENT || ret2 != -ENOENT)
3171                 ret = 0;
3172 out:
3173         btrfs_free_path(path);
3174         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3175                 WARN_ON(ret > 0);
3176         return ret;
3177 }
3178
3179 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3180                            struct btrfs_root *root,
3181                            struct extent_buffer *buf,
3182                            int full_backref, int inc)
3183 {
3184         u64 bytenr;
3185         u64 num_bytes;
3186         u64 parent;
3187         u64 ref_root;
3188         u32 nritems;
3189         struct btrfs_key key;
3190         struct btrfs_file_extent_item *fi;
3191         int i;
3192         int level;
3193         int ret = 0;
3194         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3195                             u64, u64, u64, u64, u64, u64, int);
3196
3197
3198         if (btrfs_test_is_dummy_root(root))
3199                 return 0;
3200
3201         ref_root = btrfs_header_owner(buf);
3202         nritems = btrfs_header_nritems(buf);
3203         level = btrfs_header_level(buf);
3204
3205         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3206                 return 0;
3207
3208         if (inc)
3209                 process_func = btrfs_inc_extent_ref;
3210         else
3211                 process_func = btrfs_free_extent;
3212
3213         if (full_backref)
3214                 parent = buf->start;
3215         else
3216                 parent = 0;
3217
3218         for (i = 0; i < nritems; i++) {
3219                 if (level == 0) {
3220                         btrfs_item_key_to_cpu(buf, &key, i);
3221                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3222                                 continue;
3223                         fi = btrfs_item_ptr(buf, i,
3224                                             struct btrfs_file_extent_item);
3225                         if (btrfs_file_extent_type(buf, fi) ==
3226                             BTRFS_FILE_EXTENT_INLINE)
3227                                 continue;
3228                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3229                         if (bytenr == 0)
3230                                 continue;
3231
3232                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3233                         key.offset -= btrfs_file_extent_offset(buf, fi);
3234                         ret = process_func(trans, root, bytenr, num_bytes,
3235                                            parent, ref_root, key.objectid,
3236                                            key.offset, 1);
3237                         if (ret)
3238                                 goto fail;
3239                 } else {
3240                         bytenr = btrfs_node_blockptr(buf, i);
3241                         num_bytes = root->nodesize;
3242                         ret = process_func(trans, root, bytenr, num_bytes,
3243                                            parent, ref_root, level - 1, 0,
3244                                            1);
3245                         if (ret)
3246                                 goto fail;
3247                 }
3248         }
3249         return 0;
3250 fail:
3251         return ret;
3252 }
3253
3254 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3255                   struct extent_buffer *buf, int full_backref)
3256 {
3257         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3258 }
3259
3260 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3261                   struct extent_buffer *buf, int full_backref)
3262 {
3263         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3264 }
3265
3266 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3267                                  struct btrfs_root *root,
3268                                  struct btrfs_path *path,
3269                                  struct btrfs_block_group_cache *cache)
3270 {
3271         int ret;
3272         struct btrfs_root *extent_root = root->fs_info->extent_root;
3273         unsigned long bi;
3274         struct extent_buffer *leaf;
3275
3276         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3277         if (ret) {
3278                 if (ret > 0)
3279                         ret = -ENOENT;
3280                 goto fail;
3281         }
3282
3283         leaf = path->nodes[0];
3284         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3285         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3286         btrfs_mark_buffer_dirty(leaf);
3287 fail:
3288         btrfs_release_path(path);
3289         return ret;
3290
3291 }
3292
3293 static struct btrfs_block_group_cache *
3294 next_block_group(struct btrfs_root *root,
3295                  struct btrfs_block_group_cache *cache)
3296 {
3297         struct rb_node *node;
3298
3299         spin_lock(&root->fs_info->block_group_cache_lock);
3300
3301         /* If our block group was removed, we need a full search. */
3302         if (RB_EMPTY_NODE(&cache->cache_node)) {
3303                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3304
3305                 spin_unlock(&root->fs_info->block_group_cache_lock);
3306                 btrfs_put_block_group(cache);
3307                 cache = btrfs_lookup_first_block_group(root->fs_info,
3308                                                        next_bytenr);
3309                 return cache;
3310         }
3311         node = rb_next(&cache->cache_node);
3312         btrfs_put_block_group(cache);
3313         if (node) {
3314                 cache = rb_entry(node, struct btrfs_block_group_cache,
3315                                  cache_node);
3316                 btrfs_get_block_group(cache);
3317         } else
3318                 cache = NULL;
3319         spin_unlock(&root->fs_info->block_group_cache_lock);
3320         return cache;
3321 }
3322
3323 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3324                             struct btrfs_trans_handle *trans,
3325                             struct btrfs_path *path)
3326 {
3327         struct btrfs_root *root = block_group->fs_info->tree_root;
3328         struct inode *inode = NULL;
3329         u64 alloc_hint = 0;
3330         int dcs = BTRFS_DC_ERROR;
3331         u64 num_pages = 0;
3332         int retries = 0;
3333         int ret = 0;
3334
3335         /*
3336          * If this block group is smaller than 100 megs don't bother caching the
3337          * block group.
3338          */
3339         if (block_group->key.offset < (100 * 1024 * 1024)) {
3340                 spin_lock(&block_group->lock);
3341                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3342                 spin_unlock(&block_group->lock);
3343                 return 0;
3344         }
3345
3346         if (trans->aborted)
3347                 return 0;
3348 again:
3349         inode = lookup_free_space_inode(root, block_group, path);
3350         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3351                 ret = PTR_ERR(inode);
3352                 btrfs_release_path(path);
3353                 goto out;
3354         }
3355
3356         if (IS_ERR(inode)) {
3357                 BUG_ON(retries);
3358                 retries++;
3359
3360                 if (block_group->ro)
3361                         goto out_free;
3362
3363                 ret = create_free_space_inode(root, trans, block_group, path);
3364                 if (ret)
3365                         goto out_free;
3366                 goto again;
3367         }
3368
3369         /* We've already setup this transaction, go ahead and exit */
3370         if (block_group->cache_generation == trans->transid &&
3371             i_size_read(inode)) {
3372                 dcs = BTRFS_DC_SETUP;
3373                 goto out_put;
3374         }
3375
3376         /*
3377          * We want to set the generation to 0, that way if anything goes wrong
3378          * from here on out we know not to trust this cache when we load up next
3379          * time.
3380          */
3381         BTRFS_I(inode)->generation = 0;
3382         ret = btrfs_update_inode(trans, root, inode);
3383         if (ret) {
3384                 /*
3385                  * So theoretically we could recover from this, simply set the
3386                  * super cache generation to 0 so we know to invalidate the
3387                  * cache, but then we'd have to keep track of the block groups
3388                  * that fail this way so we know we _have_ to reset this cache
3389                  * before the next commit or risk reading stale cache.  So to
3390                  * limit our exposure to horrible edge cases lets just abort the
3391                  * transaction, this only happens in really bad situations
3392                  * anyway.
3393                  */
3394                 btrfs_abort_transaction(trans, root, ret);
3395                 goto out_put;
3396         }
3397         WARN_ON(ret);
3398
3399         if (i_size_read(inode) > 0) {
3400                 ret = btrfs_check_trunc_cache_free_space(root,
3401                                         &root->fs_info->global_block_rsv);
3402                 if (ret)
3403                         goto out_put;
3404
3405                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3406                 if (ret)
3407                         goto out_put;
3408         }
3409
3410         spin_lock(&block_group->lock);
3411         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3412             !btrfs_test_opt(root, SPACE_CACHE)) {
3413                 /*
3414                  * don't bother trying to write stuff out _if_
3415                  * a) we're not cached,
3416                  * b) we're with nospace_cache mount option.
3417                  */
3418                 dcs = BTRFS_DC_WRITTEN;
3419                 spin_unlock(&block_group->lock);
3420                 goto out_put;
3421         }
3422         spin_unlock(&block_group->lock);
3423
3424         /*
3425          * We hit an ENOSPC when setting up the cache in this transaction, just
3426          * skip doing the setup, we've already cleared the cache so we're safe.
3427          */
3428         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3429                 ret = -ENOSPC;
3430                 goto out_put;
3431         }
3432
3433         /*
3434          * Try to preallocate enough space based on how big the block group is.
3435          * Keep in mind this has to include any pinned space which could end up
3436          * taking up quite a bit since it's not folded into the other space
3437          * cache.
3438          */
3439         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3440         if (!num_pages)
3441                 num_pages = 1;
3442
3443         num_pages *= 16;
3444         num_pages *= PAGE_CACHE_SIZE;
3445
3446         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3447         if (ret)
3448                 goto out_put;
3449
3450         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3451                                               num_pages, num_pages,
3452                                               &alloc_hint);
3453         /*
3454          * Our cache requires contiguous chunks so that we don't modify a bunch
3455          * of metadata or split extents when writing the cache out, which means
3456          * we can enospc if we are heavily fragmented in addition to just normal
3457          * out of space conditions.  So if we hit this just skip setting up any
3458          * other block groups for this transaction, maybe we'll unpin enough
3459          * space the next time around.
3460          */
3461         if (!ret)
3462                 dcs = BTRFS_DC_SETUP;
3463         else if (ret == -ENOSPC)
3464                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3465         btrfs_free_reserved_data_space(inode, 0, num_pages);
3466
3467 out_put:
3468         iput(inode);
3469 out_free:
3470         btrfs_release_path(path);
3471 out:
3472         spin_lock(&block_group->lock);
3473         if (!ret && dcs == BTRFS_DC_SETUP)
3474                 block_group->cache_generation = trans->transid;
3475         block_group->disk_cache_state = dcs;
3476         spin_unlock(&block_group->lock);
3477
3478         return ret;
3479 }
3480
3481 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3482                             struct btrfs_root *root)
3483 {
3484         struct btrfs_block_group_cache *cache, *tmp;
3485         struct btrfs_transaction *cur_trans = trans->transaction;
3486         struct btrfs_path *path;
3487
3488         if (list_empty(&cur_trans->dirty_bgs) ||
3489             !btrfs_test_opt(root, SPACE_CACHE))
3490                 return 0;
3491
3492         path = btrfs_alloc_path();
3493         if (!path)
3494                 return -ENOMEM;
3495
3496         /* Could add new block groups, use _safe just in case */
3497         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3498                                  dirty_list) {
3499                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3500                         cache_save_setup(cache, trans, path);
3501         }
3502
3503         btrfs_free_path(path);
3504         return 0;
3505 }
3506
3507 /*
3508  * transaction commit does final block group cache writeback during a
3509  * critical section where nothing is allowed to change the FS.  This is
3510  * required in order for the cache to actually match the block group,
3511  * but can introduce a lot of latency into the commit.
3512  *
3513  * So, btrfs_start_dirty_block_groups is here to kick off block group
3514  * cache IO.  There's a chance we'll have to redo some of it if the
3515  * block group changes again during the commit, but it greatly reduces
3516  * the commit latency by getting rid of the easy block groups while
3517  * we're still allowing others to join the commit.
3518  */
3519 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3520                                    struct btrfs_root *root)
3521 {
3522         struct btrfs_block_group_cache *cache;
3523         struct btrfs_transaction *cur_trans = trans->transaction;
3524         int ret = 0;
3525         int should_put;
3526         struct btrfs_path *path = NULL;
3527         LIST_HEAD(dirty);
3528         struct list_head *io = &cur_trans->io_bgs;
3529         int num_started = 0;
3530         int loops = 0;
3531
3532         spin_lock(&cur_trans->dirty_bgs_lock);
3533         if (list_empty(&cur_trans->dirty_bgs)) {
3534                 spin_unlock(&cur_trans->dirty_bgs_lock);
3535                 return 0;
3536         }
3537         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3538         spin_unlock(&cur_trans->dirty_bgs_lock);
3539
3540 again:
3541         /*
3542          * make sure all the block groups on our dirty list actually
3543          * exist
3544          */
3545         btrfs_create_pending_block_groups(trans, root);
3546
3547         if (!path) {
3548                 path = btrfs_alloc_path();
3549                 if (!path)
3550                         return -ENOMEM;
3551         }
3552
3553         /*
3554          * cache_write_mutex is here only to save us from balance or automatic
3555          * removal of empty block groups deleting this block group while we are
3556          * writing out the cache
3557          */
3558         mutex_lock(&trans->transaction->cache_write_mutex);
3559         while (!list_empty(&dirty)) {
3560                 cache = list_first_entry(&dirty,
3561                                          struct btrfs_block_group_cache,
3562                                          dirty_list);
3563                 /*
3564                  * this can happen if something re-dirties a block
3565                  * group that is already under IO.  Just wait for it to
3566                  * finish and then do it all again
3567                  */
3568                 if (!list_empty(&cache->io_list)) {
3569                         list_del_init(&cache->io_list);
3570                         btrfs_wait_cache_io(root, trans, cache,
3571                                             &cache->io_ctl, path,
3572                                             cache->key.objectid);
3573                         btrfs_put_block_group(cache);
3574                 }
3575
3576
3577                 /*
3578                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3579                  * if it should update the cache_state.  Don't delete
3580                  * until after we wait.
3581                  *
3582                  * Since we're not running in the commit critical section
3583                  * we need the dirty_bgs_lock to protect from update_block_group
3584                  */
3585                 spin_lock(&cur_trans->dirty_bgs_lock);
3586                 list_del_init(&cache->dirty_list);
3587                 spin_unlock(&cur_trans->dirty_bgs_lock);
3588
3589                 should_put = 1;
3590
3591                 cache_save_setup(cache, trans, path);
3592
3593                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3594                         cache->io_ctl.inode = NULL;
3595                         ret = btrfs_write_out_cache(root, trans, cache, path);
3596                         if (ret == 0 && cache->io_ctl.inode) {
3597                                 num_started++;
3598                                 should_put = 0;
3599
3600                                 /*
3601                                  * the cache_write_mutex is protecting
3602                                  * the io_list
3603                                  */
3604                                 list_add_tail(&cache->io_list, io);
3605                         } else {
3606                                 /*
3607                                  * if we failed to write the cache, the
3608                                  * generation will be bad and life goes on
3609                                  */
3610                                 ret = 0;
3611                         }
3612                 }
3613                 if (!ret) {
3614                         ret = write_one_cache_group(trans, root, path, cache);
3615                         /*
3616                          * Our block group might still be attached to the list
3617                          * of new block groups in the transaction handle of some
3618                          * other task (struct btrfs_trans_handle->new_bgs). This
3619                          * means its block group item isn't yet in the extent
3620                          * tree. If this happens ignore the error, as we will
3621                          * try again later in the critical section of the
3622                          * transaction commit.
3623                          */
3624                         if (ret == -ENOENT) {
3625                                 ret = 0;
3626                                 spin_lock(&cur_trans->dirty_bgs_lock);
3627                                 if (list_empty(&cache->dirty_list)) {
3628                                         list_add_tail(&cache->dirty_list,
3629                                                       &cur_trans->dirty_bgs);
3630                                         btrfs_get_block_group(cache);
3631                                 }
3632                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3633                         } else if (ret) {
3634                                 btrfs_abort_transaction(trans, root, ret);
3635                         }
3636                 }
3637
3638                 /* if its not on the io list, we need to put the block group */
3639                 if (should_put)
3640                         btrfs_put_block_group(cache);
3641
3642                 if (ret)
3643                         break;
3644
3645                 /*
3646                  * Avoid blocking other tasks for too long. It might even save
3647                  * us from writing caches for block groups that are going to be
3648                  * removed.
3649                  */
3650                 mutex_unlock(&trans->transaction->cache_write_mutex);
3651                 mutex_lock(&trans->transaction->cache_write_mutex);
3652         }
3653         mutex_unlock(&trans->transaction->cache_write_mutex);
3654
3655         /*
3656          * go through delayed refs for all the stuff we've just kicked off
3657          * and then loop back (just once)
3658          */
3659         ret = btrfs_run_delayed_refs(trans, root, 0);
3660         if (!ret && loops == 0) {
3661                 loops++;
3662                 spin_lock(&cur_trans->dirty_bgs_lock);
3663                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3664                 /*
3665                  * dirty_bgs_lock protects us from concurrent block group
3666                  * deletes too (not just cache_write_mutex).
3667                  */
3668                 if (!list_empty(&dirty)) {
3669                         spin_unlock(&cur_trans->dirty_bgs_lock);
3670                         goto again;
3671                 }
3672                 spin_unlock(&cur_trans->dirty_bgs_lock);
3673         }
3674
3675         btrfs_free_path(path);
3676         return ret;
3677 }
3678
3679 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3680                                    struct btrfs_root *root)
3681 {
3682         struct btrfs_block_group_cache *cache;
3683         struct btrfs_transaction *cur_trans = trans->transaction;
3684         int ret = 0;
3685         int should_put;
3686         struct btrfs_path *path;
3687         struct list_head *io = &cur_trans->io_bgs;
3688         int num_started = 0;
3689
3690         path = btrfs_alloc_path();
3691         if (!path)
3692                 return -ENOMEM;
3693
3694         /*
3695          * We don't need the lock here since we are protected by the transaction
3696          * commit.  We want to do the cache_save_setup first and then run the
3697          * delayed refs to make sure we have the best chance at doing this all
3698          * in one shot.
3699          */
3700         while (!list_empty(&cur_trans->dirty_bgs)) {
3701                 cache = list_first_entry(&cur_trans->dirty_bgs,
3702                                          struct btrfs_block_group_cache,
3703                                          dirty_list);
3704
3705                 /*
3706                  * this can happen if cache_save_setup re-dirties a block
3707                  * group that is already under IO.  Just wait for it to
3708                  * finish and then do it all again
3709                  */
3710                 if (!list_empty(&cache->io_list)) {
3711                         list_del_init(&cache->io_list);
3712                         btrfs_wait_cache_io(root, trans, cache,
3713                                             &cache->io_ctl, path,
3714                                             cache->key.objectid);
3715                         btrfs_put_block_group(cache);
3716                 }
3717
3718                 /*
3719                  * don't remove from the dirty list until after we've waited
3720                  * on any pending IO
3721                  */
3722                 list_del_init(&cache->dirty_list);
3723                 should_put = 1;
3724
3725                 cache_save_setup(cache, trans, path);
3726
3727                 if (!ret)
3728                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3729
3730                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3731                         cache->io_ctl.inode = NULL;
3732                         ret = btrfs_write_out_cache(root, trans, cache, path);
3733                         if (ret == 0 && cache->io_ctl.inode) {
3734                                 num_started++;
3735                                 should_put = 0;
3736                                 list_add_tail(&cache->io_list, io);
3737                         } else {
3738                                 /*
3739                                  * if we failed to write the cache, the
3740                                  * generation will be bad and life goes on
3741                                  */
3742                                 ret = 0;
3743                         }
3744                 }
3745                 if (!ret) {
3746                         ret = write_one_cache_group(trans, root, path, cache);
3747                         if (ret)
3748                                 btrfs_abort_transaction(trans, root, ret);
3749                 }
3750
3751                 /* if its not on the io list, we need to put the block group */
3752                 if (should_put)
3753                         btrfs_put_block_group(cache);
3754         }
3755
3756         while (!list_empty(io)) {
3757                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3758                                          io_list);
3759                 list_del_init(&cache->io_list);
3760                 btrfs_wait_cache_io(root, trans, cache,
3761                                     &cache->io_ctl, path, cache->key.objectid);
3762                 btrfs_put_block_group(cache);
3763         }
3764
3765         btrfs_free_path(path);
3766         return ret;
3767 }
3768
3769 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3770 {
3771         struct btrfs_block_group_cache *block_group;
3772         int readonly = 0;
3773
3774         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3775         if (!block_group || block_group->ro)
3776                 readonly = 1;
3777         if (block_group)
3778                 btrfs_put_block_group(block_group);
3779         return readonly;
3780 }
3781
3782 static const char *alloc_name(u64 flags)
3783 {
3784         switch (flags) {
3785         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3786                 return "mixed";
3787         case BTRFS_BLOCK_GROUP_METADATA:
3788                 return "metadata";
3789         case BTRFS_BLOCK_GROUP_DATA:
3790                 return "data";
3791         case BTRFS_BLOCK_GROUP_SYSTEM:
3792                 return "system";
3793         default:
3794                 WARN_ON(1);
3795                 return "invalid-combination";
3796         };
3797 }
3798
3799 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3800                              u64 total_bytes, u64 bytes_used,
3801                              struct btrfs_space_info **space_info)
3802 {
3803         struct btrfs_space_info *found;
3804         int i;
3805         int factor;
3806         int ret;
3807
3808         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3809                      BTRFS_BLOCK_GROUP_RAID10))
3810                 factor = 2;
3811         else
3812                 factor = 1;
3813
3814         found = __find_space_info(info, flags);
3815         if (found) {
3816                 spin_lock(&found->lock);
3817                 found->total_bytes += total_bytes;
3818                 found->disk_total += total_bytes * factor;
3819                 found->bytes_used += bytes_used;
3820                 found->disk_used += bytes_used * factor;
3821                 if (total_bytes > 0)
3822                         found->full = 0;
3823                 spin_unlock(&found->lock);
3824                 *space_info = found;
3825                 return 0;
3826         }
3827         found = kzalloc(sizeof(*found), GFP_NOFS);
3828         if (!found)
3829                 return -ENOMEM;
3830
3831         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3832         if (ret) {
3833                 kfree(found);
3834                 return ret;
3835         }
3836
3837         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3838                 INIT_LIST_HEAD(&found->block_groups[i]);
3839         init_rwsem(&found->groups_sem);
3840         spin_lock_init(&found->lock);
3841         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3842         found->total_bytes = total_bytes;
3843         found->disk_total = total_bytes * factor;
3844         found->bytes_used = bytes_used;
3845         found->disk_used = bytes_used * factor;
3846         found->bytes_pinned = 0;
3847         found->bytes_reserved = 0;
3848         found->bytes_readonly = 0;
3849         found->bytes_may_use = 0;
3850         found->full = 0;
3851         found->max_extent_size = 0;
3852         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3853         found->chunk_alloc = 0;
3854         found->flush = 0;
3855         init_waitqueue_head(&found->wait);
3856         INIT_LIST_HEAD(&found->ro_bgs);
3857
3858         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3859                                     info->space_info_kobj, "%s",
3860                                     alloc_name(found->flags));
3861         if (ret) {
3862                 kfree(found);
3863                 return ret;
3864         }
3865
3866         *space_info = found;
3867         list_add_rcu(&found->list, &info->space_info);
3868         if (flags & BTRFS_BLOCK_GROUP_DATA)
3869                 info->data_sinfo = found;
3870
3871         return ret;
3872 }
3873
3874 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3875 {
3876         u64 extra_flags = chunk_to_extended(flags) &
3877                                 BTRFS_EXTENDED_PROFILE_MASK;
3878
3879         write_seqlock(&fs_info->profiles_lock);
3880         if (flags & BTRFS_BLOCK_GROUP_DATA)
3881                 fs_info->avail_data_alloc_bits |= extra_flags;
3882         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3883                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3884         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3885                 fs_info->avail_system_alloc_bits |= extra_flags;
3886         write_sequnlock(&fs_info->profiles_lock);
3887 }
3888
3889 /*
3890  * returns target flags in extended format or 0 if restripe for this
3891  * chunk_type is not in progress
3892  *
3893  * should be called with either volume_mutex or balance_lock held
3894  */
3895 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3896 {
3897         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3898         u64 target = 0;
3899
3900         if (!bctl)
3901                 return 0;
3902
3903         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3904             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3905                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3906         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3907                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3908                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3909         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3910                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3911                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3912         }
3913
3914         return target;
3915 }
3916
3917 /*
3918  * @flags: available profiles in extended format (see ctree.h)
3919  *
3920  * Returns reduced profile in chunk format.  If profile changing is in
3921  * progress (either running or paused) picks the target profile (if it's
3922  * already available), otherwise falls back to plain reducing.
3923  */
3924 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3925 {
3926         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3927         u64 target;
3928         u64 raid_type;
3929         u64 allowed = 0;
3930
3931         /*
3932          * see if restripe for this chunk_type is in progress, if so
3933          * try to reduce to the target profile
3934          */
3935         spin_lock(&root->fs_info->balance_lock);
3936         target = get_restripe_target(root->fs_info, flags);
3937         if (target) {
3938                 /* pick target profile only if it's already available */
3939                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3940                         spin_unlock(&root->fs_info->balance_lock);
3941                         return extended_to_chunk(target);
3942                 }
3943         }
3944         spin_unlock(&root->fs_info->balance_lock);
3945
3946         /* First, mask out the RAID levels which aren't possible */
3947         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3948                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3949                         allowed |= btrfs_raid_group[raid_type];
3950         }
3951         allowed &= flags;
3952
3953         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3954                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3955         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3956                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3957         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3958                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3959         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3960                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3961         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3962                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3963
3964         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3965
3966         return extended_to_chunk(flags | allowed);
3967 }
3968
3969 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3970 {
3971         unsigned seq;
3972         u64 flags;
3973
3974         do {
3975                 flags = orig_flags;
3976                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3977
3978                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3979                         flags |= root->fs_info->avail_data_alloc_bits;
3980                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3981                         flags |= root->fs_info->avail_system_alloc_bits;
3982                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3983                         flags |= root->fs_info->avail_metadata_alloc_bits;
3984         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3985
3986         return btrfs_reduce_alloc_profile(root, flags);
3987 }
3988
3989 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3990 {
3991         u64 flags;
3992         u64 ret;
3993
3994         if (data)
3995                 flags = BTRFS_BLOCK_GROUP_DATA;
3996         else if (root == root->fs_info->chunk_root)
3997                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3998         else
3999                 flags = BTRFS_BLOCK_GROUP_METADATA;
4000
4001         ret = get_alloc_profile(root, flags);
4002         return ret;
4003 }
4004
4005 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4006 {
4007         struct btrfs_space_info *data_sinfo;
4008         struct btrfs_root *root = BTRFS_I(inode)->root;
4009         struct btrfs_fs_info *fs_info = root->fs_info;
4010         u64 used;
4011         int ret = 0;
4012         int need_commit = 2;
4013         int have_pinned_space;
4014
4015         /* make sure bytes are sectorsize aligned */
4016         bytes = ALIGN(bytes, root->sectorsize);
4017
4018         if (btrfs_is_free_space_inode(inode)) {
4019                 need_commit = 0;
4020                 ASSERT(current->journal_info);
4021         }
4022
4023         data_sinfo = fs_info->data_sinfo;
4024         if (!data_sinfo)
4025                 goto alloc;
4026
4027 again:
4028         /* make sure we have enough space to handle the data first */
4029         spin_lock(&data_sinfo->lock);
4030         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4031                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4032                 data_sinfo->bytes_may_use;
4033
4034         if (used + bytes > data_sinfo->total_bytes) {
4035                 struct btrfs_trans_handle *trans;
4036
4037                 /*
4038                  * if we don't have enough free bytes in this space then we need
4039                  * to alloc a new chunk.
4040                  */
4041                 if (!data_sinfo->full) {
4042                         u64 alloc_target;
4043
4044                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4045                         spin_unlock(&data_sinfo->lock);
4046 alloc:
4047                         alloc_target = btrfs_get_alloc_profile(root, 1);
4048                         /*
4049                          * It is ugly that we don't call nolock join
4050                          * transaction for the free space inode case here.
4051                          * But it is safe because we only do the data space
4052                          * reservation for the free space cache in the
4053                          * transaction context, the common join transaction
4054                          * just increase the counter of the current transaction
4055                          * handler, doesn't try to acquire the trans_lock of
4056                          * the fs.
4057                          */
4058                         trans = btrfs_join_transaction(root);
4059                         if (IS_ERR(trans))
4060                                 return PTR_ERR(trans);
4061
4062                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4063                                              alloc_target,
4064                                              CHUNK_ALLOC_NO_FORCE);
4065                         btrfs_end_transaction(trans, root);
4066                         if (ret < 0) {
4067                                 if (ret != -ENOSPC)
4068                                         return ret;
4069                                 else {
4070                                         have_pinned_space = 1;
4071                                         goto commit_trans;
4072                                 }
4073                         }
4074
4075                         if (!data_sinfo)
4076                                 data_sinfo = fs_info->data_sinfo;
4077
4078                         goto again;
4079                 }
4080
4081                 /*
4082                  * If we don't have enough pinned space to deal with this
4083                  * allocation, and no removed chunk in current transaction,
4084                  * don't bother committing the transaction.
4085                  */
4086                 have_pinned_space = percpu_counter_compare(
4087                         &data_sinfo->total_bytes_pinned,
4088                         used + bytes - data_sinfo->total_bytes);
4089                 spin_unlock(&data_sinfo->lock);
4090
4091                 /* commit the current transaction and try again */
4092 commit_trans:
4093                 if (need_commit &&
4094                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4095                         need_commit--;
4096
4097                         if (need_commit > 0)
4098                                 btrfs_wait_ordered_roots(fs_info, -1);
4099
4100                         trans = btrfs_join_transaction(root);
4101                         if (IS_ERR(trans))
4102                                 return PTR_ERR(trans);
4103                         if (have_pinned_space >= 0 ||
4104                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4105                                      &trans->transaction->flags) ||
4106                             need_commit > 0) {
4107                                 ret = btrfs_commit_transaction(trans, root);
4108                                 if (ret)
4109                                         return ret;
4110                                 /*
4111                                  * make sure that all running delayed iput are
4112                                  * done
4113                                  */
4114                                 down_write(&root->fs_info->delayed_iput_sem);
4115                                 up_write(&root->fs_info->delayed_iput_sem);
4116                                 goto again;
4117                         } else {
4118                                 btrfs_end_transaction(trans, root);
4119                         }
4120                 }
4121
4122                 trace_btrfs_space_reservation(root->fs_info,
4123                                               "space_info:enospc",
4124                                               data_sinfo->flags, bytes, 1);
4125                 return -ENOSPC;
4126         }
4127         data_sinfo->bytes_may_use += bytes;
4128         trace_btrfs_space_reservation(root->fs_info, "space_info",
4129                                       data_sinfo->flags, bytes, 1);
4130         spin_unlock(&data_sinfo->lock);
4131
4132         return ret;
4133 }
4134
4135 /*
4136  * New check_data_free_space() with ability for precious data reservation
4137  * Will replace old btrfs_check_data_free_space(), but for patch split,
4138  * add a new function first and then replace it.
4139  */
4140 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4141 {
4142         struct btrfs_root *root = BTRFS_I(inode)->root;
4143         int ret;
4144
4145         /* align the range */
4146         len = round_up(start + len, root->sectorsize) -
4147               round_down(start, root->sectorsize);
4148         start = round_down(start, root->sectorsize);
4149
4150         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4151         if (ret < 0)
4152                 return ret;
4153
4154         /*
4155          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4156          *
4157          * TODO: Find a good method to avoid reserve data space for NOCOW
4158          * range, but don't impact performance on quota disable case.
4159          */
4160         ret = btrfs_qgroup_reserve_data(inode, start, len);
4161         return ret;
4162 }
4163
4164 /*
4165  * Called if we need to clear a data reservation for this inode
4166  * Normally in a error case.
4167  *
4168  * This one will *NOT* use accurate qgroup reserved space API, just for case
4169  * which we can't sleep and is sure it won't affect qgroup reserved space.
4170  * Like clear_bit_hook().
4171  */
4172 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4173                                             u64 len)
4174 {
4175         struct btrfs_root *root = BTRFS_I(inode)->root;
4176         struct btrfs_space_info *data_sinfo;
4177
4178         /* Make sure the range is aligned to sectorsize */
4179         len = round_up(start + len, root->sectorsize) -
4180               round_down(start, root->sectorsize);
4181         start = round_down(start, root->sectorsize);
4182
4183         data_sinfo = root->fs_info->data_sinfo;
4184         spin_lock(&data_sinfo->lock);
4185         if (WARN_ON(data_sinfo->bytes_may_use < len))
4186                 data_sinfo->bytes_may_use = 0;
4187         else
4188                 data_sinfo->bytes_may_use -= len;
4189         trace_btrfs_space_reservation(root->fs_info, "space_info",
4190                                       data_sinfo->flags, len, 0);
4191         spin_unlock(&data_sinfo->lock);
4192 }
4193
4194 /*
4195  * Called if we need to clear a data reservation for this inode
4196  * Normally in a error case.
4197  *
4198  * This one will handle the per-indoe data rsv map for accurate reserved
4199  * space framework.
4200  */
4201 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4202 {
4203         btrfs_free_reserved_data_space_noquota(inode, start, len);
4204         btrfs_qgroup_free_data(inode, start, len);
4205 }
4206
4207 static void force_metadata_allocation(struct btrfs_fs_info *info)
4208 {
4209         struct list_head *head = &info->space_info;
4210         struct btrfs_space_info *found;
4211
4212         rcu_read_lock();
4213         list_for_each_entry_rcu(found, head, list) {
4214                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4215                         found->force_alloc = CHUNK_ALLOC_FORCE;
4216         }
4217         rcu_read_unlock();
4218 }
4219
4220 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4221 {
4222         return (global->size << 1);
4223 }
4224
4225 static int should_alloc_chunk(struct btrfs_root *root,
4226                               struct btrfs_space_info *sinfo, int force)
4227 {
4228         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4229         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4230         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4231         u64 thresh;
4232
4233         if (force == CHUNK_ALLOC_FORCE)
4234                 return 1;
4235
4236         /*
4237          * We need to take into account the global rsv because for all intents
4238          * and purposes it's used space.  Don't worry about locking the
4239          * global_rsv, it doesn't change except when the transaction commits.
4240          */
4241         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4242                 num_allocated += calc_global_rsv_need_space(global_rsv);
4243
4244         /*
4245          * in limited mode, we want to have some free space up to
4246          * about 1% of the FS size.
4247          */
4248         if (force == CHUNK_ALLOC_LIMITED) {
4249                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4250                 thresh = max_t(u64, 64 * 1024 * 1024,
4251                                div_factor_fine(thresh, 1));
4252
4253                 if (num_bytes - num_allocated < thresh)
4254                         return 1;
4255         }
4256
4257         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4258                 return 0;
4259         return 1;
4260 }
4261
4262 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4263 {
4264         u64 num_dev;
4265
4266         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4267                     BTRFS_BLOCK_GROUP_RAID0 |
4268                     BTRFS_BLOCK_GROUP_RAID5 |
4269                     BTRFS_BLOCK_GROUP_RAID6))
4270                 num_dev = root->fs_info->fs_devices->rw_devices;
4271         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4272                 num_dev = 2;
4273         else
4274                 num_dev = 1;    /* DUP or single */
4275
4276         return num_dev;
4277 }
4278
4279 /*
4280  * If @is_allocation is true, reserve space in the system space info necessary
4281  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4282  * removing a chunk.
4283  */
4284 void check_system_chunk(struct btrfs_trans_handle *trans,
4285                         struct btrfs_root *root,
4286                         u64 type)
4287 {
4288         struct btrfs_space_info *info;
4289         u64 left;
4290         u64 thresh;
4291         int ret = 0;
4292         u64 num_devs;
4293
4294         /*
4295          * Needed because we can end up allocating a system chunk and for an
4296          * atomic and race free space reservation in the chunk block reserve.
4297          */
4298         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4299
4300         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4301         spin_lock(&info->lock);
4302         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4303                 info->bytes_reserved - info->bytes_readonly -
4304                 info->bytes_may_use;
4305         spin_unlock(&info->lock);
4306
4307         num_devs = get_profile_num_devs(root, type);
4308
4309         /* num_devs device items to update and 1 chunk item to add or remove */
4310         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4311                 btrfs_calc_trans_metadata_size(root, 1);
4312
4313         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4314                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4315                         left, thresh, type);
4316                 dump_space_info(info, 0, 0);
4317         }
4318
4319         if (left < thresh) {
4320                 u64 flags;
4321
4322                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4323                 /*
4324                  * Ignore failure to create system chunk. We might end up not
4325                  * needing it, as we might not need to COW all nodes/leafs from
4326                  * the paths we visit in the chunk tree (they were already COWed
4327                  * or created in the current transaction for example).
4328                  */
4329                 ret = btrfs_alloc_chunk(trans, root, flags);
4330         }
4331
4332         if (!ret) {
4333                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4334                                           &root->fs_info->chunk_block_rsv,
4335                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4336                 if (!ret)
4337                         trans->chunk_bytes_reserved += thresh;
4338         }
4339 }
4340
4341 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4342                           struct btrfs_root *extent_root, u64 flags, int force)
4343 {
4344         struct btrfs_space_info *space_info;
4345         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4346         int wait_for_alloc = 0;
4347         int ret = 0;
4348
4349         /* Don't re-enter if we're already allocating a chunk */
4350         if (trans->allocating_chunk)
4351                 return -ENOSPC;
4352
4353         space_info = __find_space_info(extent_root->fs_info, flags);
4354         if (!space_info) {
4355                 ret = update_space_info(extent_root->fs_info, flags,
4356                                         0, 0, &space_info);
4357                 BUG_ON(ret); /* -ENOMEM */
4358         }
4359         BUG_ON(!space_info); /* Logic error */
4360
4361 again:
4362         spin_lock(&space_info->lock);
4363         if (force < space_info->force_alloc)
4364                 force = space_info->force_alloc;
4365         if (space_info->full) {
4366                 if (should_alloc_chunk(extent_root, space_info, force))
4367                         ret = -ENOSPC;
4368                 else
4369                         ret = 0;
4370                 spin_unlock(&space_info->lock);
4371                 return ret;
4372         }
4373
4374         if (!should_alloc_chunk(extent_root, space_info, force)) {
4375                 spin_unlock(&space_info->lock);
4376                 return 0;
4377         } else if (space_info->chunk_alloc) {
4378                 wait_for_alloc = 1;
4379         } else {
4380                 space_info->chunk_alloc = 1;
4381         }
4382
4383         spin_unlock(&space_info->lock);
4384
4385         mutex_lock(&fs_info->chunk_mutex);
4386
4387         /*
4388          * The chunk_mutex is held throughout the entirety of a chunk
4389          * allocation, so once we've acquired the chunk_mutex we know that the
4390          * other guy is done and we need to recheck and see if we should
4391          * allocate.
4392          */
4393         if (wait_for_alloc) {
4394                 mutex_unlock(&fs_info->chunk_mutex);
4395                 wait_for_alloc = 0;
4396                 goto again;
4397         }
4398
4399         trans->allocating_chunk = true;
4400
4401         /*
4402          * If we have mixed data/metadata chunks we want to make sure we keep
4403          * allocating mixed chunks instead of individual chunks.
4404          */
4405         if (btrfs_mixed_space_info(space_info))
4406                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4407
4408         /*
4409          * if we're doing a data chunk, go ahead and make sure that
4410          * we keep a reasonable number of metadata chunks allocated in the
4411          * FS as well.
4412          */
4413         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4414                 fs_info->data_chunk_allocations++;
4415                 if (!(fs_info->data_chunk_allocations %
4416                       fs_info->metadata_ratio))
4417                         force_metadata_allocation(fs_info);
4418         }
4419
4420         /*
4421          * Check if we have enough space in SYSTEM chunk because we may need
4422          * to update devices.
4423          */
4424         check_system_chunk(trans, extent_root, flags);
4425
4426         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4427         trans->allocating_chunk = false;
4428
4429         spin_lock(&space_info->lock);
4430         if (ret < 0 && ret != -ENOSPC)
4431                 goto out;
4432         if (ret)
4433                 space_info->full = 1;
4434         else
4435                 ret = 1;
4436
4437         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4438 out:
4439         space_info->chunk_alloc = 0;
4440         spin_unlock(&space_info->lock);
4441         mutex_unlock(&fs_info->chunk_mutex);
4442         /*
4443          * When we allocate a new chunk we reserve space in the chunk block
4444          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4445          * add new nodes/leafs to it if we end up needing to do it when
4446          * inserting the chunk item and updating device items as part of the
4447          * second phase of chunk allocation, performed by
4448          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4449          * large number of new block groups to create in our transaction
4450          * handle's new_bgs list to avoid exhausting the chunk block reserve
4451          * in extreme cases - like having a single transaction create many new
4452          * block groups when starting to write out the free space caches of all
4453          * the block groups that were made dirty during the lifetime of the
4454          * transaction.
4455          */
4456         if (trans->can_flush_pending_bgs &&
4457             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4458                 btrfs_create_pending_block_groups(trans, trans->root);
4459                 btrfs_trans_release_chunk_metadata(trans);
4460         }
4461         return ret;
4462 }
4463
4464 static int can_overcommit(struct btrfs_root *root,
4465                           struct btrfs_space_info *space_info, u64 bytes,
4466                           enum btrfs_reserve_flush_enum flush)
4467 {
4468         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4469         u64 profile = btrfs_get_alloc_profile(root, 0);
4470         u64 space_size;
4471         u64 avail;
4472         u64 used;
4473
4474         used = space_info->bytes_used + space_info->bytes_reserved +
4475                 space_info->bytes_pinned + space_info->bytes_readonly;
4476
4477         /*
4478          * We only want to allow over committing if we have lots of actual space
4479          * free, but if we don't have enough space to handle the global reserve
4480          * space then we could end up having a real enospc problem when trying
4481          * to allocate a chunk or some other such important allocation.
4482          */
4483         spin_lock(&global_rsv->lock);
4484         space_size = calc_global_rsv_need_space(global_rsv);
4485         spin_unlock(&global_rsv->lock);
4486         if (used + space_size >= space_info->total_bytes)
4487                 return 0;
4488
4489         used += space_info->bytes_may_use;
4490
4491         spin_lock(&root->fs_info->free_chunk_lock);
4492         avail = root->fs_info->free_chunk_space;
4493         spin_unlock(&root->fs_info->free_chunk_lock);
4494
4495         /*
4496          * If we have dup, raid1 or raid10 then only half of the free
4497          * space is actually useable.  For raid56, the space info used
4498          * doesn't include the parity drive, so we don't have to
4499          * change the math
4500          */
4501         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4502                        BTRFS_BLOCK_GROUP_RAID1 |
4503                        BTRFS_BLOCK_GROUP_RAID10))
4504                 avail >>= 1;
4505
4506         /*
4507          * If we aren't flushing all things, let us overcommit up to
4508          * 1/2th of the space. If we can flush, don't let us overcommit
4509          * too much, let it overcommit up to 1/8 of the space.
4510          */
4511         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4512                 avail >>= 3;
4513         else
4514                 avail >>= 1;
4515
4516         if (used + bytes < space_info->total_bytes + avail)
4517                 return 1;
4518         return 0;
4519 }
4520
4521 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4522                                          unsigned long nr_pages, int nr_items)
4523 {
4524         struct super_block *sb = root->fs_info->sb;
4525
4526         if (down_read_trylock(&sb->s_umount)) {
4527                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4528                 up_read(&sb->s_umount);
4529         } else {
4530                 /*
4531                  * We needn't worry the filesystem going from r/w to r/o though
4532                  * we don't acquire ->s_umount mutex, because the filesystem
4533                  * should guarantee the delalloc inodes list be empty after
4534                  * the filesystem is readonly(all dirty pages are written to
4535                  * the disk).
4536                  */
4537                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4538                 if (!current->journal_info)
4539                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4540         }
4541 }
4542
4543 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4544 {
4545         u64 bytes;
4546         int nr;
4547
4548         bytes = btrfs_calc_trans_metadata_size(root, 1);
4549         nr = (int)div64_u64(to_reclaim, bytes);
4550         if (!nr)
4551                 nr = 1;
4552         return nr;
4553 }
4554
4555 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4556
4557 /*
4558  * shrink metadata reservation for delalloc
4559  */
4560 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4561                             bool wait_ordered)
4562 {
4563         struct btrfs_block_rsv *block_rsv;
4564         struct btrfs_space_info *space_info;
4565         struct btrfs_trans_handle *trans;
4566         u64 delalloc_bytes;
4567         u64 max_reclaim;
4568         long time_left;
4569         unsigned long nr_pages;
4570         int loops;
4571         int items;
4572         enum btrfs_reserve_flush_enum flush;
4573
4574         /* Calc the number of the pages we need flush for space reservation */
4575         items = calc_reclaim_items_nr(root, to_reclaim);
4576         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4577
4578         trans = (struct btrfs_trans_handle *)current->journal_info;
4579         block_rsv = &root->fs_info->delalloc_block_rsv;
4580         space_info = block_rsv->space_info;
4581
4582         delalloc_bytes = percpu_counter_sum_positive(
4583                                                 &root->fs_info->delalloc_bytes);
4584         if (delalloc_bytes == 0) {
4585                 if (trans)
4586                         return;
4587                 if (wait_ordered)
4588                         btrfs_wait_ordered_roots(root->fs_info, items);
4589                 return;
4590         }
4591
4592         loops = 0;
4593         while (delalloc_bytes && loops < 3) {
4594                 max_reclaim = min(delalloc_bytes, to_reclaim);
4595                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4596                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4597                 /*
4598                  * We need to wait for the async pages to actually start before
4599                  * we do anything.
4600                  */
4601                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4602                 if (!max_reclaim)
4603                         goto skip_async;
4604
4605                 if (max_reclaim <= nr_pages)
4606                         max_reclaim = 0;
4607                 else
4608                         max_reclaim -= nr_pages;
4609
4610                 wait_event(root->fs_info->async_submit_wait,
4611                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4612                            (int)max_reclaim);
4613 skip_async:
4614                 if (!trans)
4615                         flush = BTRFS_RESERVE_FLUSH_ALL;
4616                 else
4617                         flush = BTRFS_RESERVE_NO_FLUSH;
4618                 spin_lock(&space_info->lock);
4619                 if (can_overcommit(root, space_info, orig, flush)) {
4620                         spin_unlock(&space_info->lock);
4621                         break;
4622                 }
4623                 spin_unlock(&space_info->lock);
4624
4625                 loops++;
4626                 if (wait_ordered && !trans) {
4627                         btrfs_wait_ordered_roots(root->fs_info, items);
4628                 } else {
4629                         time_left = schedule_timeout_killable(1);
4630                         if (time_left)
4631                                 break;
4632                 }
4633                 delalloc_bytes = percpu_counter_sum_positive(
4634                                                 &root->fs_info->delalloc_bytes);
4635         }
4636 }
4637
4638 /**
4639  * maybe_commit_transaction - possibly commit the transaction if its ok to
4640  * @root - the root we're allocating for
4641  * @bytes - the number of bytes we want to reserve
4642  * @force - force the commit
4643  *
4644  * This will check to make sure that committing the transaction will actually
4645  * get us somewhere and then commit the transaction if it does.  Otherwise it
4646  * will return -ENOSPC.
4647  */
4648 static int may_commit_transaction(struct btrfs_root *root,
4649                                   struct btrfs_space_info *space_info,
4650                                   u64 bytes, int force)
4651 {
4652         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4653         struct btrfs_trans_handle *trans;
4654
4655         trans = (struct btrfs_trans_handle *)current->journal_info;
4656         if (trans)
4657                 return -EAGAIN;
4658
4659         if (force)
4660                 goto commit;
4661
4662         /* See if there is enough pinned space to make this reservation */
4663         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4664                                    bytes) >= 0)
4665                 goto commit;
4666
4667         /*
4668          * See if there is some space in the delayed insertion reservation for
4669          * this reservation.
4670          */
4671         if (space_info != delayed_rsv->space_info)
4672                 return -ENOSPC;
4673
4674         spin_lock(&delayed_rsv->lock);
4675         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4676                                    bytes - delayed_rsv->size) >= 0) {
4677                 spin_unlock(&delayed_rsv->lock);
4678                 return -ENOSPC;
4679         }
4680         spin_unlock(&delayed_rsv->lock);
4681
4682 commit:
4683         trans = btrfs_join_transaction(root);
4684         if (IS_ERR(trans))
4685                 return -ENOSPC;
4686
4687         return btrfs_commit_transaction(trans, root);
4688 }
4689
4690 enum flush_state {
4691         FLUSH_DELAYED_ITEMS_NR  =       1,
4692         FLUSH_DELAYED_ITEMS     =       2,
4693         FLUSH_DELALLOC          =       3,
4694         FLUSH_DELALLOC_WAIT     =       4,
4695         ALLOC_CHUNK             =       5,
4696         COMMIT_TRANS            =       6,
4697 };
4698
4699 static int flush_space(struct btrfs_root *root,
4700                        struct btrfs_space_info *space_info, u64 num_bytes,
4701                        u64 orig_bytes, int state)
4702 {
4703         struct btrfs_trans_handle *trans;
4704         int nr;
4705         int ret = 0;
4706
4707         switch (state) {
4708         case FLUSH_DELAYED_ITEMS_NR:
4709         case FLUSH_DELAYED_ITEMS:
4710                 if (state == FLUSH_DELAYED_ITEMS_NR)
4711                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4712                 else
4713                         nr = -1;
4714
4715                 trans = btrfs_join_transaction(root);
4716                 if (IS_ERR(trans)) {
4717                         ret = PTR_ERR(trans);
4718                         break;
4719                 }
4720                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4721                 btrfs_end_transaction(trans, root);
4722                 break;
4723         case FLUSH_DELALLOC:
4724         case FLUSH_DELALLOC_WAIT:
4725                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4726                                 state == FLUSH_DELALLOC_WAIT);
4727                 break;
4728         case ALLOC_CHUNK:
4729                 trans = btrfs_join_transaction(root);
4730                 if (IS_ERR(trans)) {
4731                         ret = PTR_ERR(trans);
4732                         break;
4733                 }
4734                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4735                                      btrfs_get_alloc_profile(root, 0),
4736                                      CHUNK_ALLOC_NO_FORCE);
4737                 btrfs_end_transaction(trans, root);
4738                 if (ret == -ENOSPC)
4739                         ret = 0;
4740                 break;
4741         case COMMIT_TRANS:
4742                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4743                 break;
4744         default:
4745                 ret = -ENOSPC;
4746                 break;
4747         }
4748
4749         return ret;
4750 }
4751
4752 static inline u64
4753 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4754                                  struct btrfs_space_info *space_info)
4755 {
4756         u64 used;
4757         u64 expected;
4758         u64 to_reclaim;
4759
4760         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4761                                 16 * 1024 * 1024);
4762         spin_lock(&space_info->lock);
4763         if (can_overcommit(root, space_info, to_reclaim,
4764                            BTRFS_RESERVE_FLUSH_ALL)) {
4765                 to_reclaim = 0;
4766                 goto out;
4767         }
4768
4769         used = space_info->bytes_used + space_info->bytes_reserved +
4770                space_info->bytes_pinned + space_info->bytes_readonly +
4771                space_info->bytes_may_use;
4772         if (can_overcommit(root, space_info, 1024 * 1024,
4773                            BTRFS_RESERVE_FLUSH_ALL))
4774                 expected = div_factor_fine(space_info->total_bytes, 95);
4775         else
4776                 expected = div_factor_fine(space_info->total_bytes, 90);
4777
4778         if (used > expected)
4779                 to_reclaim = used - expected;
4780         else
4781                 to_reclaim = 0;
4782         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4783                                      space_info->bytes_reserved);
4784 out:
4785         spin_unlock(&space_info->lock);
4786
4787         return to_reclaim;
4788 }
4789
4790 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4791                                         struct btrfs_fs_info *fs_info, u64 used)
4792 {
4793         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4794
4795         /* If we're just plain full then async reclaim just slows us down. */
4796         if (space_info->bytes_used >= thresh)
4797                 return 0;
4798
4799         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4800                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4801 }
4802
4803 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4804                                        struct btrfs_fs_info *fs_info,
4805                                        int flush_state)
4806 {
4807         u64 used;
4808
4809         spin_lock(&space_info->lock);
4810         /*
4811          * We run out of space and have not got any free space via flush_space,
4812          * so don't bother doing async reclaim.
4813          */
4814         if (flush_state > COMMIT_TRANS && space_info->full) {
4815                 spin_unlock(&space_info->lock);
4816                 return 0;
4817         }
4818
4819         used = space_info->bytes_used + space_info->bytes_reserved +
4820                space_info->bytes_pinned + space_info->bytes_readonly +
4821                space_info->bytes_may_use;
4822         if (need_do_async_reclaim(space_info, fs_info, used)) {
4823                 spin_unlock(&space_info->lock);
4824                 return 1;
4825         }
4826         spin_unlock(&space_info->lock);
4827
4828         return 0;
4829 }
4830
4831 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4832 {
4833         struct btrfs_fs_info *fs_info;
4834         struct btrfs_space_info *space_info;
4835         u64 to_reclaim;
4836         int flush_state;
4837
4838         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4839         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4840
4841         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4842                                                       space_info);
4843         if (!to_reclaim)
4844                 return;
4845
4846         flush_state = FLUSH_DELAYED_ITEMS_NR;
4847         do {
4848                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4849                             to_reclaim, flush_state);
4850                 flush_state++;
4851                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4852                                                  flush_state))
4853                         return;
4854         } while (flush_state < COMMIT_TRANS);
4855 }
4856
4857 void btrfs_init_async_reclaim_work(struct work_struct *work)
4858 {
4859         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4860 }
4861
4862 /**
4863  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4864  * @root - the root we're allocating for
4865  * @block_rsv - the block_rsv we're allocating for
4866  * @orig_bytes - the number of bytes we want
4867  * @flush - whether or not we can flush to make our reservation
4868  *
4869  * This will reserve orgi_bytes number of bytes from the space info associated
4870  * with the block_rsv.  If there is not enough space it will make an attempt to
4871  * flush out space to make room.  It will do this by flushing delalloc if
4872  * possible or committing the transaction.  If flush is 0 then no attempts to
4873  * regain reservations will be made and this will fail if there is not enough
4874  * space already.
4875  */
4876 static int reserve_metadata_bytes(struct btrfs_root *root,
4877                                   struct btrfs_block_rsv *block_rsv,
4878                                   u64 orig_bytes,
4879                                   enum btrfs_reserve_flush_enum flush)
4880 {
4881         struct btrfs_space_info *space_info = block_rsv->space_info;
4882         u64 used;
4883         u64 num_bytes = orig_bytes;
4884         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4885         int ret = 0;
4886         bool flushing = false;
4887
4888 again:
4889         ret = 0;
4890         spin_lock(&space_info->lock);
4891         /*
4892          * We only want to wait if somebody other than us is flushing and we
4893          * are actually allowed to flush all things.
4894          */
4895         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4896                space_info->flush) {
4897                 spin_unlock(&space_info->lock);
4898                 /*
4899                  * If we have a trans handle we can't wait because the flusher
4900                  * may have to commit the transaction, which would mean we would
4901                  * deadlock since we are waiting for the flusher to finish, but
4902                  * hold the current transaction open.
4903                  */
4904                 if (current->journal_info)
4905                         return -EAGAIN;
4906                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4907                 /* Must have been killed, return */
4908                 if (ret)
4909                         return -EINTR;
4910
4911                 spin_lock(&space_info->lock);
4912         }
4913
4914         ret = -ENOSPC;
4915         used = space_info->bytes_used + space_info->bytes_reserved +
4916                 space_info->bytes_pinned + space_info->bytes_readonly +
4917                 space_info->bytes_may_use;
4918
4919         /*
4920          * The idea here is that we've not already over-reserved the block group
4921          * then we can go ahead and save our reservation first and then start
4922          * flushing if we need to.  Otherwise if we've already overcommitted
4923          * lets start flushing stuff first and then come back and try to make
4924          * our reservation.
4925          */
4926         if (used <= space_info->total_bytes) {
4927                 if (used + orig_bytes <= space_info->total_bytes) {
4928                         space_info->bytes_may_use += orig_bytes;
4929                         trace_btrfs_space_reservation(root->fs_info,
4930                                 "space_info", space_info->flags, orig_bytes, 1);
4931                         ret = 0;
4932                 } else {
4933                         /*
4934                          * Ok set num_bytes to orig_bytes since we aren't
4935                          * overocmmitted, this way we only try and reclaim what
4936                          * we need.
4937                          */
4938                         num_bytes = orig_bytes;
4939                 }
4940         } else {
4941                 /*
4942                  * Ok we're over committed, set num_bytes to the overcommitted
4943                  * amount plus the amount of bytes that we need for this
4944                  * reservation.
4945                  */
4946                 num_bytes = used - space_info->total_bytes +
4947                         (orig_bytes * 2);
4948         }
4949
4950         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4951                 space_info->bytes_may_use += orig_bytes;
4952                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4953                                               space_info->flags, orig_bytes,
4954                                               1);
4955                 ret = 0;
4956         }
4957
4958         /*
4959          * Couldn't make our reservation, save our place so while we're trying
4960          * to reclaim space we can actually use it instead of somebody else
4961          * stealing it from us.
4962          *
4963          * We make the other tasks wait for the flush only when we can flush
4964          * all things.
4965          */
4966         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4967                 flushing = true;
4968                 space_info->flush = 1;
4969         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4970                 used += orig_bytes;
4971                 /*
4972                  * We will do the space reservation dance during log replay,
4973                  * which means we won't have fs_info->fs_root set, so don't do
4974                  * the async reclaim as we will panic.
4975                  */
4976                 if (!root->fs_info->log_root_recovering &&
4977                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4978                     !work_busy(&root->fs_info->async_reclaim_work))
4979                         queue_work(system_unbound_wq,
4980                                    &root->fs_info->async_reclaim_work);
4981         }
4982         spin_unlock(&space_info->lock);
4983
4984         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4985                 goto out;
4986
4987         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4988                           flush_state);
4989         flush_state++;
4990
4991         /*
4992          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4993          * would happen. So skip delalloc flush.
4994          */
4995         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4996             (flush_state == FLUSH_DELALLOC ||
4997              flush_state == FLUSH_DELALLOC_WAIT))
4998                 flush_state = ALLOC_CHUNK;
4999
5000         if (!ret)
5001                 goto again;
5002         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5003                  flush_state < COMMIT_TRANS)
5004                 goto again;
5005         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
5006                  flush_state <= COMMIT_TRANS)
5007                 goto again;
5008
5009 out:
5010         if (ret == -ENOSPC &&
5011             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5012                 struct btrfs_block_rsv *global_rsv =
5013                         &root->fs_info->global_block_rsv;
5014
5015                 if (block_rsv != global_rsv &&
5016                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5017                         ret = 0;
5018         }
5019         if (ret == -ENOSPC)
5020                 trace_btrfs_space_reservation(root->fs_info,
5021                                               "space_info:enospc",
5022                                               space_info->flags, orig_bytes, 1);
5023         if (flushing) {
5024                 spin_lock(&space_info->lock);
5025                 space_info->flush = 0;
5026                 wake_up_all(&space_info->wait);
5027                 spin_unlock(&space_info->lock);
5028         }
5029         return ret;
5030 }
5031
5032 static struct btrfs_block_rsv *get_block_rsv(
5033                                         const struct btrfs_trans_handle *trans,
5034                                         const struct btrfs_root *root)
5035 {
5036         struct btrfs_block_rsv *block_rsv = NULL;
5037
5038         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5039             (root == root->fs_info->csum_root && trans->adding_csums) ||
5040              (root == root->fs_info->uuid_root))
5041                 block_rsv = trans->block_rsv;
5042
5043         if (!block_rsv)
5044                 block_rsv = root->block_rsv;
5045
5046         if (!block_rsv)
5047                 block_rsv = &root->fs_info->empty_block_rsv;
5048
5049         return block_rsv;
5050 }
5051
5052 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5053                                u64 num_bytes)
5054 {
5055         int ret = -ENOSPC;
5056         spin_lock(&block_rsv->lock);
5057         if (block_rsv->reserved >= num_bytes) {
5058                 block_rsv->reserved -= num_bytes;
5059                 if (block_rsv->reserved < block_rsv->size)
5060                         block_rsv->full = 0;
5061                 ret = 0;
5062         }
5063         spin_unlock(&block_rsv->lock);
5064         return ret;
5065 }
5066
5067 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5068                                 u64 num_bytes, int update_size)
5069 {
5070         spin_lock(&block_rsv->lock);
5071         block_rsv->reserved += num_bytes;
5072         if (update_size)
5073                 block_rsv->size += num_bytes;
5074         else if (block_rsv->reserved >= block_rsv->size)
5075                 block_rsv->full = 1;
5076         spin_unlock(&block_rsv->lock);
5077 }
5078
5079 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5080                              struct btrfs_block_rsv *dest, u64 num_bytes,
5081                              int min_factor)
5082 {
5083         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5084         u64 min_bytes;
5085
5086         if (global_rsv->space_info != dest->space_info)
5087                 return -ENOSPC;
5088
5089         spin_lock(&global_rsv->lock);
5090         min_bytes = div_factor(global_rsv->size, min_factor);
5091         if (global_rsv->reserved < min_bytes + num_bytes) {
5092                 spin_unlock(&global_rsv->lock);
5093                 return -ENOSPC;
5094         }
5095         global_rsv->reserved -= num_bytes;
5096         if (global_rsv->reserved < global_rsv->size)
5097                 global_rsv->full = 0;
5098         spin_unlock(&global_rsv->lock);
5099
5100         block_rsv_add_bytes(dest, num_bytes, 1);
5101         return 0;
5102 }
5103
5104 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5105                                     struct btrfs_block_rsv *block_rsv,
5106                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5107 {
5108         struct btrfs_space_info *space_info = block_rsv->space_info;
5109
5110         spin_lock(&block_rsv->lock);
5111         if (num_bytes == (u64)-1)
5112                 num_bytes = block_rsv->size;
5113         block_rsv->size -= num_bytes;
5114         if (block_rsv->reserved >= block_rsv->size) {
5115                 num_bytes = block_rsv->reserved - block_rsv->size;
5116                 block_rsv->reserved = block_rsv->size;
5117                 block_rsv->full = 1;
5118         } else {
5119                 num_bytes = 0;
5120         }
5121         spin_unlock(&block_rsv->lock);
5122
5123         if (num_bytes > 0) {
5124                 if (dest) {
5125                         spin_lock(&dest->lock);
5126                         if (!dest->full) {
5127                                 u64 bytes_to_add;
5128
5129                                 bytes_to_add = dest->size - dest->reserved;
5130                                 bytes_to_add = min(num_bytes, bytes_to_add);
5131                                 dest->reserved += bytes_to_add;
5132                                 if (dest->reserved >= dest->size)
5133                                         dest->full = 1;
5134                                 num_bytes -= bytes_to_add;
5135                         }
5136                         spin_unlock(&dest->lock);
5137                 }
5138                 if (num_bytes) {
5139                         spin_lock(&space_info->lock);
5140                         space_info->bytes_may_use -= num_bytes;
5141                         trace_btrfs_space_reservation(fs_info, "space_info",
5142                                         space_info->flags, num_bytes, 0);
5143                         spin_unlock(&space_info->lock);
5144                 }
5145         }
5146 }
5147
5148 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5149                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5150 {
5151         int ret;
5152
5153         ret = block_rsv_use_bytes(src, num_bytes);
5154         if (ret)
5155                 return ret;
5156
5157         block_rsv_add_bytes(dst, num_bytes, 1);
5158         return 0;
5159 }
5160
5161 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5162 {
5163         memset(rsv, 0, sizeof(*rsv));
5164         spin_lock_init(&rsv->lock);
5165         rsv->type = type;
5166 }
5167
5168 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5169                                               unsigned short type)
5170 {
5171         struct btrfs_block_rsv *block_rsv;
5172         struct btrfs_fs_info *fs_info = root->fs_info;
5173
5174         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5175         if (!block_rsv)
5176                 return NULL;
5177
5178         btrfs_init_block_rsv(block_rsv, type);
5179         block_rsv->space_info = __find_space_info(fs_info,
5180                                                   BTRFS_BLOCK_GROUP_METADATA);
5181         return block_rsv;
5182 }
5183
5184 void btrfs_free_block_rsv(struct btrfs_root *root,
5185                           struct btrfs_block_rsv *rsv)
5186 {
5187         if (!rsv)
5188                 return;
5189         btrfs_block_rsv_release(root, rsv, (u64)-1);
5190         kfree(rsv);
5191 }
5192
5193 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5194 {
5195         kfree(rsv);
5196 }
5197
5198 int btrfs_block_rsv_add(struct btrfs_root *root,
5199                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5200                         enum btrfs_reserve_flush_enum flush)
5201 {
5202         int ret;
5203
5204         if (num_bytes == 0)
5205                 return 0;
5206
5207         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5208         if (!ret) {
5209                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5210                 return 0;
5211         }
5212
5213         return ret;
5214 }
5215
5216 int btrfs_block_rsv_check(struct btrfs_root *root,
5217                           struct btrfs_block_rsv *block_rsv, int min_factor)
5218 {
5219         u64 num_bytes = 0;
5220         int ret = -ENOSPC;
5221
5222         if (!block_rsv)
5223                 return 0;
5224
5225         spin_lock(&block_rsv->lock);
5226         num_bytes = div_factor(block_rsv->size, min_factor);
5227         if (block_rsv->reserved >= num_bytes)
5228                 ret = 0;
5229         spin_unlock(&block_rsv->lock);
5230
5231         return ret;
5232 }
5233
5234 int btrfs_block_rsv_refill(struct btrfs_root *root,
5235                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5236                            enum btrfs_reserve_flush_enum flush)
5237 {
5238         u64 num_bytes = 0;
5239         int ret = -ENOSPC;
5240
5241         if (!block_rsv)
5242                 return 0;
5243
5244         spin_lock(&block_rsv->lock);
5245         num_bytes = min_reserved;
5246         if (block_rsv->reserved >= num_bytes)
5247                 ret = 0;
5248         else
5249                 num_bytes -= block_rsv->reserved;
5250         spin_unlock(&block_rsv->lock);
5251
5252         if (!ret)
5253                 return 0;
5254
5255         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5256         if (!ret) {
5257                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5258                 return 0;
5259         }
5260
5261         return ret;
5262 }
5263
5264 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5265                             struct btrfs_block_rsv *dst_rsv,
5266                             u64 num_bytes)
5267 {
5268         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5269 }
5270
5271 void btrfs_block_rsv_release(struct btrfs_root *root,
5272                              struct btrfs_block_rsv *block_rsv,
5273                              u64 num_bytes)
5274 {
5275         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5276         if (global_rsv == block_rsv ||
5277             block_rsv->space_info != global_rsv->space_info)
5278                 global_rsv = NULL;
5279         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5280                                 num_bytes);
5281 }
5282
5283 /*
5284  * helper to calculate size of global block reservation.
5285  * the desired value is sum of space used by extent tree,
5286  * checksum tree and root tree
5287  */
5288 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5289 {
5290         struct btrfs_space_info *sinfo;
5291         u64 num_bytes;
5292         u64 meta_used;
5293         u64 data_used;
5294         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5295
5296         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5297         spin_lock(&sinfo->lock);
5298         data_used = sinfo->bytes_used;
5299         spin_unlock(&sinfo->lock);
5300
5301         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5302         spin_lock(&sinfo->lock);
5303         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5304                 data_used = 0;
5305         meta_used = sinfo->bytes_used;
5306         spin_unlock(&sinfo->lock);
5307
5308         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5309                     csum_size * 2;
5310         num_bytes += div_u64(data_used + meta_used, 50);
5311
5312         if (num_bytes * 3 > meta_used)
5313                 num_bytes = div_u64(meta_used, 3);
5314
5315         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5316 }
5317
5318 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5319 {
5320         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5321         struct btrfs_space_info *sinfo = block_rsv->space_info;
5322         u64 num_bytes;
5323
5324         num_bytes = calc_global_metadata_size(fs_info);
5325
5326         spin_lock(&sinfo->lock);
5327         spin_lock(&block_rsv->lock);
5328
5329         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5330
5331         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5332                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5333                     sinfo->bytes_may_use;
5334
5335         if (sinfo->total_bytes > num_bytes) {
5336                 num_bytes = sinfo->total_bytes - num_bytes;
5337                 block_rsv->reserved += num_bytes;
5338                 sinfo->bytes_may_use += num_bytes;
5339                 trace_btrfs_space_reservation(fs_info, "space_info",
5340                                       sinfo->flags, num_bytes, 1);
5341         }
5342
5343         if (block_rsv->reserved >= block_rsv->size) {
5344                 num_bytes = block_rsv->reserved - block_rsv->size;
5345                 sinfo->bytes_may_use -= num_bytes;
5346                 trace_btrfs_space_reservation(fs_info, "space_info",
5347                                       sinfo->flags, num_bytes, 0);
5348                 block_rsv->reserved = block_rsv->size;
5349                 block_rsv->full = 1;
5350         }
5351
5352         spin_unlock(&block_rsv->lock);
5353         spin_unlock(&sinfo->lock);
5354 }
5355
5356 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5357 {
5358         struct btrfs_space_info *space_info;
5359
5360         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5361         fs_info->chunk_block_rsv.space_info = space_info;
5362
5363         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5364         fs_info->global_block_rsv.space_info = space_info;
5365         fs_info->delalloc_block_rsv.space_info = space_info;
5366         fs_info->trans_block_rsv.space_info = space_info;
5367         fs_info->empty_block_rsv.space_info = space_info;
5368         fs_info->delayed_block_rsv.space_info = space_info;
5369
5370         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5371         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5372         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5373         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5374         if (fs_info->quota_root)
5375                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5376         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5377
5378         update_global_block_rsv(fs_info);
5379 }
5380
5381 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5382 {
5383         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5384                                 (u64)-1);
5385         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5386         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5387         WARN_ON(fs_info->trans_block_rsv.size > 0);
5388         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5389         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5390         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5391         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5392         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5393 }
5394
5395 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5396                                   struct btrfs_root *root)
5397 {
5398         if (!trans->block_rsv)
5399                 return;
5400
5401         if (!trans->bytes_reserved)
5402                 return;
5403
5404         trace_btrfs_space_reservation(root->fs_info, "transaction",
5405                                       trans->transid, trans->bytes_reserved, 0);
5406         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5407         trans->bytes_reserved = 0;
5408 }
5409
5410 /*
5411  * To be called after all the new block groups attached to the transaction
5412  * handle have been created (btrfs_create_pending_block_groups()).
5413  */
5414 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5415 {
5416         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5417
5418         if (!trans->chunk_bytes_reserved)
5419                 return;
5420
5421         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5422
5423         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5424                                 trans->chunk_bytes_reserved);
5425         trans->chunk_bytes_reserved = 0;
5426 }
5427
5428 /* Can only return 0 or -ENOSPC */
5429 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5430                                   struct inode *inode)
5431 {
5432         struct btrfs_root *root = BTRFS_I(inode)->root;
5433         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5434         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5435
5436         /*
5437          * We need to hold space in order to delete our orphan item once we've
5438          * added it, so this takes the reservation so we can release it later
5439          * when we are truly done with the orphan item.
5440          */
5441         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5442         trace_btrfs_space_reservation(root->fs_info, "orphan",
5443                                       btrfs_ino(inode), num_bytes, 1);
5444         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5445 }
5446
5447 void btrfs_orphan_release_metadata(struct inode *inode)
5448 {
5449         struct btrfs_root *root = BTRFS_I(inode)->root;
5450         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5451         trace_btrfs_space_reservation(root->fs_info, "orphan",
5452                                       btrfs_ino(inode), num_bytes, 0);
5453         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5454 }
5455
5456 /*
5457  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5458  * root: the root of the parent directory
5459  * rsv: block reservation
5460  * items: the number of items that we need do reservation
5461  * qgroup_reserved: used to return the reserved size in qgroup
5462  *
5463  * This function is used to reserve the space for snapshot/subvolume
5464  * creation and deletion. Those operations are different with the
5465  * common file/directory operations, they change two fs/file trees
5466  * and root tree, the number of items that the qgroup reserves is
5467  * different with the free space reservation. So we can not use
5468  * the space reseravtion mechanism in start_transaction().
5469  */
5470 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5471                                      struct btrfs_block_rsv *rsv,
5472                                      int items,
5473                                      u64 *qgroup_reserved,
5474                                      bool use_global_rsv)
5475 {
5476         u64 num_bytes;
5477         int ret;
5478         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5479
5480         if (root->fs_info->quota_enabled) {
5481                 /* One for parent inode, two for dir entries */
5482                 num_bytes = 3 * root->nodesize;
5483                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5484                 if (ret)
5485                         return ret;
5486         } else {
5487                 num_bytes = 0;
5488         }
5489
5490         *qgroup_reserved = num_bytes;
5491
5492         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5493         rsv->space_info = __find_space_info(root->fs_info,
5494                                             BTRFS_BLOCK_GROUP_METADATA);
5495         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5496                                   BTRFS_RESERVE_FLUSH_ALL);
5497
5498         if (ret == -ENOSPC && use_global_rsv)
5499                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5500
5501         if (ret && *qgroup_reserved)
5502                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5503
5504         return ret;
5505 }
5506
5507 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5508                                       struct btrfs_block_rsv *rsv,
5509                                       u64 qgroup_reserved)
5510 {
5511         btrfs_block_rsv_release(root, rsv, (u64)-1);
5512 }
5513
5514 /**
5515  * drop_outstanding_extent - drop an outstanding extent
5516  * @inode: the inode we're dropping the extent for
5517  * @num_bytes: the number of bytes we're relaseing.
5518  *
5519  * This is called when we are freeing up an outstanding extent, either called
5520  * after an error or after an extent is written.  This will return the number of
5521  * reserved extents that need to be freed.  This must be called with
5522  * BTRFS_I(inode)->lock held.
5523  */
5524 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5525 {
5526         unsigned drop_inode_space = 0;
5527         unsigned dropped_extents = 0;
5528         unsigned num_extents = 0;
5529
5530         num_extents = (unsigned)div64_u64(num_bytes +
5531                                           BTRFS_MAX_EXTENT_SIZE - 1,
5532                                           BTRFS_MAX_EXTENT_SIZE);
5533         ASSERT(num_extents);
5534         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5535         BTRFS_I(inode)->outstanding_extents -= num_extents;
5536
5537         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5538             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5539                                &BTRFS_I(inode)->runtime_flags))
5540                 drop_inode_space = 1;
5541
5542         /*
5543          * If we have more or the same amount of outsanding extents than we have
5544          * reserved then we need to leave the reserved extents count alone.
5545          */
5546         if (BTRFS_I(inode)->outstanding_extents >=
5547             BTRFS_I(inode)->reserved_extents)
5548                 return drop_inode_space;
5549
5550         dropped_extents = BTRFS_I(inode)->reserved_extents -
5551                 BTRFS_I(inode)->outstanding_extents;
5552         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5553         return dropped_extents + drop_inode_space;
5554 }
5555
5556 /**
5557  * calc_csum_metadata_size - return the amount of metada space that must be
5558  *      reserved/free'd for the given bytes.
5559  * @inode: the inode we're manipulating
5560  * @num_bytes: the number of bytes in question
5561  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5562  *
5563  * This adjusts the number of csum_bytes in the inode and then returns the
5564  * correct amount of metadata that must either be reserved or freed.  We
5565  * calculate how many checksums we can fit into one leaf and then divide the
5566  * number of bytes that will need to be checksumed by this value to figure out
5567  * how many checksums will be required.  If we are adding bytes then the number
5568  * may go up and we will return the number of additional bytes that must be
5569  * reserved.  If it is going down we will return the number of bytes that must
5570  * be freed.
5571  *
5572  * This must be called with BTRFS_I(inode)->lock held.
5573  */
5574 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5575                                    int reserve)
5576 {
5577         struct btrfs_root *root = BTRFS_I(inode)->root;
5578         u64 old_csums, num_csums;
5579
5580         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5581             BTRFS_I(inode)->csum_bytes == 0)
5582                 return 0;
5583
5584         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5585         if (reserve)
5586                 BTRFS_I(inode)->csum_bytes += num_bytes;
5587         else
5588                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5589         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5590
5591         /* No change, no need to reserve more */
5592         if (old_csums == num_csums)
5593                 return 0;
5594
5595         if (reserve)
5596                 return btrfs_calc_trans_metadata_size(root,
5597                                                       num_csums - old_csums);
5598
5599         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5600 }
5601
5602 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5603 {
5604         struct btrfs_root *root = BTRFS_I(inode)->root;
5605         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5606         u64 to_reserve = 0;
5607         u64 csum_bytes;
5608         unsigned nr_extents = 0;
5609         int extra_reserve = 0;
5610         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5611         int ret = 0;
5612         bool delalloc_lock = true;
5613         u64 to_free = 0;
5614         unsigned dropped;
5615
5616         /* If we are a free space inode we need to not flush since we will be in
5617          * the middle of a transaction commit.  We also don't need the delalloc
5618          * mutex since we won't race with anybody.  We need this mostly to make
5619          * lockdep shut its filthy mouth.
5620          */
5621         if (btrfs_is_free_space_inode(inode)) {
5622                 flush = BTRFS_RESERVE_NO_FLUSH;
5623                 delalloc_lock = false;
5624         }
5625
5626         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5627             btrfs_transaction_in_commit(root->fs_info))
5628                 schedule_timeout(1);
5629
5630         if (delalloc_lock)
5631                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5632
5633         num_bytes = ALIGN(num_bytes, root->sectorsize);
5634
5635         spin_lock(&BTRFS_I(inode)->lock);
5636         nr_extents = (unsigned)div64_u64(num_bytes +
5637                                          BTRFS_MAX_EXTENT_SIZE - 1,
5638                                          BTRFS_MAX_EXTENT_SIZE);
5639         BTRFS_I(inode)->outstanding_extents += nr_extents;
5640         nr_extents = 0;
5641
5642         if (BTRFS_I(inode)->outstanding_extents >
5643             BTRFS_I(inode)->reserved_extents)
5644                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5645                         BTRFS_I(inode)->reserved_extents;
5646
5647         /*
5648          * Add an item to reserve for updating the inode when we complete the
5649          * delalloc io.
5650          */
5651         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5652                       &BTRFS_I(inode)->runtime_flags)) {
5653                 nr_extents++;
5654                 extra_reserve = 1;
5655         }
5656
5657         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5658         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5659         csum_bytes = BTRFS_I(inode)->csum_bytes;
5660         spin_unlock(&BTRFS_I(inode)->lock);
5661
5662         if (root->fs_info->quota_enabled) {
5663                 ret = btrfs_qgroup_reserve_meta(root,
5664                                 nr_extents * root->nodesize);
5665                 if (ret)
5666                         goto out_fail;
5667         }
5668
5669         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5670         if (unlikely(ret)) {
5671                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5672                 goto out_fail;
5673         }
5674
5675         spin_lock(&BTRFS_I(inode)->lock);
5676         if (extra_reserve) {
5677                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5678                         &BTRFS_I(inode)->runtime_flags);
5679                 nr_extents--;
5680         }
5681         BTRFS_I(inode)->reserved_extents += nr_extents;
5682         spin_unlock(&BTRFS_I(inode)->lock);
5683
5684         if (delalloc_lock)
5685                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5686
5687         if (to_reserve)
5688                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5689                                               btrfs_ino(inode), to_reserve, 1);
5690         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5691
5692         return 0;
5693
5694 out_fail:
5695         spin_lock(&BTRFS_I(inode)->lock);
5696         dropped = drop_outstanding_extent(inode, num_bytes);
5697         /*
5698          * If the inodes csum_bytes is the same as the original
5699          * csum_bytes then we know we haven't raced with any free()ers
5700          * so we can just reduce our inodes csum bytes and carry on.
5701          */
5702         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5703                 calc_csum_metadata_size(inode, num_bytes, 0);
5704         } else {
5705                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5706                 u64 bytes;
5707
5708                 /*
5709                  * This is tricky, but first we need to figure out how much we
5710                  * free'd from any free-ers that occured during this
5711                  * reservation, so we reset ->csum_bytes to the csum_bytes
5712                  * before we dropped our lock, and then call the free for the
5713                  * number of bytes that were freed while we were trying our
5714                  * reservation.
5715                  */
5716                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5717                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5718                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5719
5720
5721                 /*
5722                  * Now we need to see how much we would have freed had we not
5723                  * been making this reservation and our ->csum_bytes were not
5724                  * artificially inflated.
5725                  */
5726                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5727                 bytes = csum_bytes - orig_csum_bytes;
5728                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5729
5730                 /*
5731                  * Now reset ->csum_bytes to what it should be.  If bytes is
5732                  * more than to_free then we would have free'd more space had we
5733                  * not had an artificially high ->csum_bytes, so we need to free
5734                  * the remainder.  If bytes is the same or less then we don't
5735                  * need to do anything, the other free-ers did the correct
5736                  * thing.
5737                  */
5738                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5739                 if (bytes > to_free)
5740                         to_free = bytes - to_free;
5741                 else
5742                         to_free = 0;
5743         }
5744         spin_unlock(&BTRFS_I(inode)->lock);
5745         if (dropped)
5746                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5747
5748         if (to_free) {
5749                 btrfs_block_rsv_release(root, block_rsv, to_free);
5750                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5751                                               btrfs_ino(inode), to_free, 0);
5752         }
5753         if (delalloc_lock)
5754                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5755         return ret;
5756 }
5757
5758 /**
5759  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5760  * @inode: the inode to release the reservation for
5761  * @num_bytes: the number of bytes we're releasing
5762  *
5763  * This will release the metadata reservation for an inode.  This can be called
5764  * once we complete IO for a given set of bytes to release their metadata
5765  * reservations.
5766  */
5767 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5768 {
5769         struct btrfs_root *root = BTRFS_I(inode)->root;
5770         u64 to_free = 0;
5771         unsigned dropped;
5772
5773         num_bytes = ALIGN(num_bytes, root->sectorsize);
5774         spin_lock(&BTRFS_I(inode)->lock);
5775         dropped = drop_outstanding_extent(inode, num_bytes);
5776
5777         if (num_bytes)
5778                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5779         spin_unlock(&BTRFS_I(inode)->lock);
5780         if (dropped > 0)
5781                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5782
5783         if (btrfs_test_is_dummy_root(root))
5784                 return;
5785
5786         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5787                                       btrfs_ino(inode), to_free, 0);
5788
5789         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5790                                 to_free);
5791 }
5792
5793 /**
5794  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5795  * delalloc
5796  * @inode: inode we're writing to
5797  * @start: start range we are writing to
5798  * @len: how long the range we are writing to
5799  *
5800  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5801  *
5802  * This will do the following things
5803  *
5804  * o reserve space in data space info for num bytes
5805  *   and reserve precious corresponding qgroup space
5806  *   (Done in check_data_free_space)
5807  *
5808  * o reserve space for metadata space, based on the number of outstanding
5809  *   extents and how much csums will be needed
5810  *   also reserve metadata space in a per root over-reserve method.
5811  * o add to the inodes->delalloc_bytes
5812  * o add it to the fs_info's delalloc inodes list.
5813  *   (Above 3 all done in delalloc_reserve_metadata)
5814  *
5815  * Return 0 for success
5816  * Return <0 for error(-ENOSPC or -EQUOT)
5817  */
5818 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5819 {
5820         int ret;
5821
5822         ret = btrfs_check_data_free_space(inode, start, len);
5823         if (ret < 0)
5824                 return ret;
5825         ret = btrfs_delalloc_reserve_metadata(inode, len);
5826         if (ret < 0)
5827                 btrfs_free_reserved_data_space(inode, start, len);
5828         return ret;
5829 }
5830
5831 /**
5832  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5833  * @inode: inode we're releasing space for
5834  * @start: start position of the space already reserved
5835  * @len: the len of the space already reserved
5836  *
5837  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5838  * called in the case that we don't need the metadata AND data reservations
5839  * anymore.  So if there is an error or we insert an inline extent.
5840  *
5841  * This function will release the metadata space that was not used and will
5842  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5843  * list if there are no delalloc bytes left.
5844  * Also it will handle the qgroup reserved space.
5845  */
5846 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5847 {
5848         btrfs_delalloc_release_metadata(inode, len);
5849         btrfs_free_reserved_data_space(inode, start, len);
5850 }
5851
5852 static int update_block_group(struct btrfs_trans_handle *trans,
5853                               struct btrfs_root *root, u64 bytenr,
5854                               u64 num_bytes, int alloc)
5855 {
5856         struct btrfs_block_group_cache *cache = NULL;
5857         struct btrfs_fs_info *info = root->fs_info;
5858         u64 total = num_bytes;
5859         u64 old_val;
5860         u64 byte_in_group;
5861         int factor;
5862
5863         /* block accounting for super block */
5864         spin_lock(&info->delalloc_root_lock);
5865         old_val = btrfs_super_bytes_used(info->super_copy);
5866         if (alloc)
5867                 old_val += num_bytes;
5868         else
5869                 old_val -= num_bytes;
5870         btrfs_set_super_bytes_used(info->super_copy, old_val);
5871         spin_unlock(&info->delalloc_root_lock);
5872
5873         while (total) {
5874                 cache = btrfs_lookup_block_group(info, bytenr);
5875                 if (!cache)
5876                         return -ENOENT;
5877                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5878                                     BTRFS_BLOCK_GROUP_RAID1 |
5879                                     BTRFS_BLOCK_GROUP_RAID10))
5880                         factor = 2;
5881                 else
5882                         factor = 1;
5883                 /*
5884                  * If this block group has free space cache written out, we
5885                  * need to make sure to load it if we are removing space.  This
5886                  * is because we need the unpinning stage to actually add the
5887                  * space back to the block group, otherwise we will leak space.
5888                  */
5889                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5890                         cache_block_group(cache, 1);
5891
5892                 byte_in_group = bytenr - cache->key.objectid;
5893                 WARN_ON(byte_in_group > cache->key.offset);
5894
5895                 spin_lock(&cache->space_info->lock);
5896                 spin_lock(&cache->lock);
5897
5898                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5899                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5900                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5901
5902                 old_val = btrfs_block_group_used(&cache->item);
5903                 num_bytes = min(total, cache->key.offset - byte_in_group);
5904                 if (alloc) {
5905                         old_val += num_bytes;
5906                         btrfs_set_block_group_used(&cache->item, old_val);
5907                         cache->reserved -= num_bytes;
5908                         cache->space_info->bytes_reserved -= num_bytes;
5909                         cache->space_info->bytes_used += num_bytes;
5910                         cache->space_info->disk_used += num_bytes * factor;
5911                         spin_unlock(&cache->lock);
5912                         spin_unlock(&cache->space_info->lock);
5913                 } else {
5914                         old_val -= num_bytes;
5915                         btrfs_set_block_group_used(&cache->item, old_val);
5916                         cache->pinned += num_bytes;
5917                         cache->space_info->bytes_pinned += num_bytes;
5918                         cache->space_info->bytes_used -= num_bytes;
5919                         cache->space_info->disk_used -= num_bytes * factor;
5920                         spin_unlock(&cache->lock);
5921                         spin_unlock(&cache->space_info->lock);
5922
5923                         set_extent_dirty(info->pinned_extents,
5924                                          bytenr, bytenr + num_bytes - 1,
5925                                          GFP_NOFS | __GFP_NOFAIL);
5926                         /*
5927                          * No longer have used bytes in this block group, queue
5928                          * it for deletion.
5929                          */
5930                         if (old_val == 0) {
5931                                 spin_lock(&info->unused_bgs_lock);
5932                                 if (list_empty(&cache->bg_list)) {
5933                                         btrfs_get_block_group(cache);
5934                                         list_add_tail(&cache->bg_list,
5935                                                       &info->unused_bgs);
5936                                 }
5937                                 spin_unlock(&info->unused_bgs_lock);
5938                         }
5939                 }
5940
5941                 spin_lock(&trans->transaction->dirty_bgs_lock);
5942                 if (list_empty(&cache->dirty_list)) {
5943                         list_add_tail(&cache->dirty_list,
5944                                       &trans->transaction->dirty_bgs);
5945                                 trans->transaction->num_dirty_bgs++;
5946                         btrfs_get_block_group(cache);
5947                 }
5948                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5949
5950                 btrfs_put_block_group(cache);
5951                 total -= num_bytes;
5952                 bytenr += num_bytes;
5953         }
5954         return 0;
5955 }
5956
5957 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5958 {
5959         struct btrfs_block_group_cache *cache;
5960         u64 bytenr;
5961
5962         spin_lock(&root->fs_info->block_group_cache_lock);
5963         bytenr = root->fs_info->first_logical_byte;
5964         spin_unlock(&root->fs_info->block_group_cache_lock);
5965
5966         if (bytenr < (u64)-1)
5967                 return bytenr;
5968
5969         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5970         if (!cache)
5971                 return 0;
5972
5973         bytenr = cache->key.objectid;
5974         btrfs_put_block_group(cache);
5975
5976         return bytenr;
5977 }
5978
5979 static int pin_down_extent(struct btrfs_root *root,
5980                            struct btrfs_block_group_cache *cache,
5981                            u64 bytenr, u64 num_bytes, int reserved)
5982 {
5983         spin_lock(&cache->space_info->lock);
5984         spin_lock(&cache->lock);
5985         cache->pinned += num_bytes;
5986         cache->space_info->bytes_pinned += num_bytes;
5987         if (reserved) {
5988                 cache->reserved -= num_bytes;
5989                 cache->space_info->bytes_reserved -= num_bytes;
5990         }
5991         spin_unlock(&cache->lock);
5992         spin_unlock(&cache->space_info->lock);
5993
5994         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5995                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5996         if (reserved)
5997                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5998         return 0;
5999 }
6000
6001 /*
6002  * this function must be called within transaction
6003  */
6004 int btrfs_pin_extent(struct btrfs_root *root,
6005                      u64 bytenr, u64 num_bytes, int reserved)
6006 {
6007         struct btrfs_block_group_cache *cache;
6008
6009         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6010         BUG_ON(!cache); /* Logic error */
6011
6012         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6013
6014         btrfs_put_block_group(cache);
6015         return 0;
6016 }
6017
6018 /*
6019  * this function must be called within transaction
6020  */
6021 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6022                                     u64 bytenr, u64 num_bytes)
6023 {
6024         struct btrfs_block_group_cache *cache;
6025         int ret;
6026
6027         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6028         if (!cache)
6029                 return -EINVAL;
6030
6031         /*
6032          * pull in the free space cache (if any) so that our pin
6033          * removes the free space from the cache.  We have load_only set
6034          * to one because the slow code to read in the free extents does check
6035          * the pinned extents.
6036          */
6037         cache_block_group(cache, 1);
6038
6039         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6040
6041         /* remove us from the free space cache (if we're there at all) */
6042         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6043         btrfs_put_block_group(cache);
6044         return ret;
6045 }
6046
6047 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6048 {
6049         int ret;
6050         struct btrfs_block_group_cache *block_group;
6051         struct btrfs_caching_control *caching_ctl;
6052
6053         block_group = btrfs_lookup_block_group(root->fs_info, start);
6054         if (!block_group)
6055                 return -EINVAL;
6056
6057         cache_block_group(block_group, 0);
6058         caching_ctl = get_caching_control(block_group);
6059
6060         if (!caching_ctl) {
6061                 /* Logic error */
6062                 BUG_ON(!block_group_cache_done(block_group));
6063                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6064         } else {
6065                 mutex_lock(&caching_ctl->mutex);
6066
6067                 if (start >= caching_ctl->progress) {
6068                         ret = add_excluded_extent(root, start, num_bytes);
6069                 } else if (start + num_bytes <= caching_ctl->progress) {
6070                         ret = btrfs_remove_free_space(block_group,
6071                                                       start, num_bytes);
6072                 } else {
6073                         num_bytes = caching_ctl->progress - start;
6074                         ret = btrfs_remove_free_space(block_group,
6075                                                       start, num_bytes);
6076                         if (ret)
6077                                 goto out_lock;
6078
6079                         num_bytes = (start + num_bytes) -
6080                                 caching_ctl->progress;
6081                         start = caching_ctl->progress;
6082                         ret = add_excluded_extent(root, start, num_bytes);
6083                 }
6084 out_lock:
6085                 mutex_unlock(&caching_ctl->mutex);
6086                 put_caching_control(caching_ctl);
6087         }
6088         btrfs_put_block_group(block_group);
6089         return ret;
6090 }
6091
6092 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6093                                  struct extent_buffer *eb)
6094 {
6095         struct btrfs_file_extent_item *item;
6096         struct btrfs_key key;
6097         int found_type;
6098         int i;
6099
6100         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6101                 return 0;
6102
6103         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6104                 btrfs_item_key_to_cpu(eb, &key, i);
6105                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6106                         continue;
6107                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6108                 found_type = btrfs_file_extent_type(eb, item);
6109                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6110                         continue;
6111                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6112                         continue;
6113                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6114                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6115                 __exclude_logged_extent(log, key.objectid, key.offset);
6116         }
6117
6118         return 0;
6119 }
6120
6121 /**
6122  * btrfs_update_reserved_bytes - update the block_group and space info counters
6123  * @cache:      The cache we are manipulating
6124  * @num_bytes:  The number of bytes in question
6125  * @reserve:    One of the reservation enums
6126  * @delalloc:   The blocks are allocated for the delalloc write
6127  *
6128  * This is called by the allocator when it reserves space, or by somebody who is
6129  * freeing space that was never actually used on disk.  For example if you
6130  * reserve some space for a new leaf in transaction A and before transaction A
6131  * commits you free that leaf, you call this with reserve set to 0 in order to
6132  * clear the reservation.
6133  *
6134  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6135  * ENOSPC accounting.  For data we handle the reservation through clearing the
6136  * delalloc bits in the io_tree.  We have to do this since we could end up
6137  * allocating less disk space for the amount of data we have reserved in the
6138  * case of compression.
6139  *
6140  * If this is a reservation and the block group has become read only we cannot
6141  * make the reservation and return -EAGAIN, otherwise this function always
6142  * succeeds.
6143  */
6144 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6145                                        u64 num_bytes, int reserve, int delalloc)
6146 {
6147         struct btrfs_space_info *space_info = cache->space_info;
6148         int ret = 0;
6149
6150         spin_lock(&space_info->lock);
6151         spin_lock(&cache->lock);
6152         if (reserve != RESERVE_FREE) {
6153                 if (cache->ro) {
6154                         ret = -EAGAIN;
6155                 } else {
6156                         cache->reserved += num_bytes;
6157                         space_info->bytes_reserved += num_bytes;
6158                         if (reserve == RESERVE_ALLOC) {
6159                                 trace_btrfs_space_reservation(cache->fs_info,
6160                                                 "space_info", space_info->flags,
6161                                                 num_bytes, 0);
6162                                 space_info->bytes_may_use -= num_bytes;
6163                         }
6164
6165                         if (delalloc)
6166                                 cache->delalloc_bytes += num_bytes;
6167                 }
6168         } else {
6169                 if (cache->ro)
6170                         space_info->bytes_readonly += num_bytes;
6171                 cache->reserved -= num_bytes;
6172                 space_info->bytes_reserved -= num_bytes;
6173
6174                 if (delalloc)
6175                         cache->delalloc_bytes -= num_bytes;
6176         }
6177         spin_unlock(&cache->lock);
6178         spin_unlock(&space_info->lock);
6179         return ret;
6180 }
6181
6182 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6183                                 struct btrfs_root *root)
6184 {
6185         struct btrfs_fs_info *fs_info = root->fs_info;
6186         struct btrfs_caching_control *next;
6187         struct btrfs_caching_control *caching_ctl;
6188         struct btrfs_block_group_cache *cache;
6189
6190         down_write(&fs_info->commit_root_sem);
6191
6192         list_for_each_entry_safe(caching_ctl, next,
6193                                  &fs_info->caching_block_groups, list) {
6194                 cache = caching_ctl->block_group;
6195                 if (block_group_cache_done(cache)) {
6196                         cache->last_byte_to_unpin = (u64)-1;
6197                         list_del_init(&caching_ctl->list);
6198                         put_caching_control(caching_ctl);
6199                 } else {
6200                         cache->last_byte_to_unpin = caching_ctl->progress;
6201                 }
6202         }
6203
6204         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6205                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6206         else
6207                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6208
6209         up_write(&fs_info->commit_root_sem);
6210
6211         update_global_block_rsv(fs_info);
6212 }
6213
6214 /*
6215  * Returns the free cluster for the given space info and sets empty_cluster to
6216  * what it should be based on the mount options.
6217  */
6218 static struct btrfs_free_cluster *
6219 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6220                    u64 *empty_cluster)
6221 {
6222         struct btrfs_free_cluster *ret = NULL;
6223         bool ssd = btrfs_test_opt(root, SSD);
6224
6225         *empty_cluster = 0;
6226         if (btrfs_mixed_space_info(space_info))
6227                 return ret;
6228
6229         if (ssd)
6230                 *empty_cluster = 2 * 1024 * 1024;
6231         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6232                 ret = &root->fs_info->meta_alloc_cluster;
6233                 if (!ssd)
6234                         *empty_cluster = 64 * 1024;
6235         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6236                 ret = &root->fs_info->data_alloc_cluster;
6237         }
6238
6239         return ret;
6240 }
6241
6242 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6243                               const bool return_free_space)
6244 {
6245         struct btrfs_fs_info *fs_info = root->fs_info;
6246         struct btrfs_block_group_cache *cache = NULL;
6247         struct btrfs_space_info *space_info;
6248         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6249         struct btrfs_free_cluster *cluster = NULL;
6250         u64 len;
6251         u64 total_unpinned = 0;
6252         u64 empty_cluster = 0;
6253         bool readonly;
6254
6255         while (start <= end) {
6256                 readonly = false;
6257                 if (!cache ||
6258                     start >= cache->key.objectid + cache->key.offset) {
6259                         if (cache)
6260                                 btrfs_put_block_group(cache);
6261                         total_unpinned = 0;
6262                         cache = btrfs_lookup_block_group(fs_info, start);
6263                         BUG_ON(!cache); /* Logic error */
6264
6265                         cluster = fetch_cluster_info(root,
6266                                                      cache->space_info,
6267                                                      &empty_cluster);
6268                         empty_cluster <<= 1;
6269                 }
6270
6271                 len = cache->key.objectid + cache->key.offset - start;
6272                 len = min(len, end + 1 - start);
6273
6274                 if (start < cache->last_byte_to_unpin) {
6275                         len = min(len, cache->last_byte_to_unpin - start);
6276                         if (return_free_space)
6277                                 btrfs_add_free_space(cache, start, len);
6278                 }
6279
6280                 start += len;
6281                 total_unpinned += len;
6282                 space_info = cache->space_info;
6283
6284                 /*
6285                  * If this space cluster has been marked as fragmented and we've
6286                  * unpinned enough in this block group to potentially allow a
6287                  * cluster to be created inside of it go ahead and clear the
6288                  * fragmented check.
6289                  */
6290                 if (cluster && cluster->fragmented &&
6291                     total_unpinned > empty_cluster) {
6292                         spin_lock(&cluster->lock);
6293                         cluster->fragmented = 0;
6294                         spin_unlock(&cluster->lock);
6295                 }
6296
6297                 spin_lock(&space_info->lock);
6298                 spin_lock(&cache->lock);
6299                 cache->pinned -= len;
6300                 space_info->bytes_pinned -= len;
6301                 space_info->max_extent_size = 0;
6302                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6303                 if (cache->ro) {
6304                         space_info->bytes_readonly += len;
6305                         readonly = true;
6306                 }
6307                 spin_unlock(&cache->lock);
6308                 if (!readonly && global_rsv->space_info == space_info) {
6309                         spin_lock(&global_rsv->lock);
6310                         if (!global_rsv->full) {
6311                                 len = min(len, global_rsv->size -
6312                                           global_rsv->reserved);
6313                                 global_rsv->reserved += len;
6314                                 space_info->bytes_may_use += len;
6315                                 if (global_rsv->reserved >= global_rsv->size)
6316                                         global_rsv->full = 1;
6317                         }
6318                         spin_unlock(&global_rsv->lock);
6319                 }
6320                 spin_unlock(&space_info->lock);
6321         }
6322
6323         if (cache)
6324                 btrfs_put_block_group(cache);
6325         return 0;
6326 }
6327
6328 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6329                                struct btrfs_root *root)
6330 {
6331         struct btrfs_fs_info *fs_info = root->fs_info;
6332         struct btrfs_block_group_cache *block_group, *tmp;
6333         struct list_head *deleted_bgs;
6334         struct extent_io_tree *unpin;
6335         u64 start;
6336         u64 end;
6337         int ret;
6338
6339         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6340                 unpin = &fs_info->freed_extents[1];
6341         else
6342                 unpin = &fs_info->freed_extents[0];
6343
6344         while (!trans->aborted) {
6345                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6346                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6347                                             EXTENT_DIRTY, NULL);
6348                 if (ret) {
6349                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6350                         break;
6351                 }
6352
6353                 if (btrfs_test_opt(root, DISCARD))
6354                         ret = btrfs_discard_extent(root, start,
6355                                                    end + 1 - start, NULL);
6356
6357                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6358                 unpin_extent_range(root, start, end, true);
6359                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6360                 cond_resched();
6361         }
6362
6363         /*
6364          * Transaction is finished.  We don't need the lock anymore.  We
6365          * do need to clean up the block groups in case of a transaction
6366          * abort.
6367          */
6368         deleted_bgs = &trans->transaction->deleted_bgs;
6369         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6370                 u64 trimmed = 0;
6371
6372                 ret = -EROFS;
6373                 if (!trans->aborted)
6374                         ret = btrfs_discard_extent(root,
6375                                                    block_group->key.objectid,
6376                                                    block_group->key.offset,
6377                                                    &trimmed);
6378
6379                 list_del_init(&block_group->bg_list);
6380                 btrfs_put_block_group_trimming(block_group);
6381                 btrfs_put_block_group(block_group);
6382
6383                 if (ret) {
6384                         const char *errstr = btrfs_decode_error(ret);
6385                         btrfs_warn(fs_info,
6386                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6387                                    ret, errstr);
6388                 }
6389         }
6390
6391         return 0;
6392 }
6393
6394 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6395                              u64 owner, u64 root_objectid)
6396 {
6397         struct btrfs_space_info *space_info;
6398         u64 flags;
6399
6400         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6401                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6402                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6403                 else
6404                         flags = BTRFS_BLOCK_GROUP_METADATA;
6405         } else {
6406                 flags = BTRFS_BLOCK_GROUP_DATA;
6407         }
6408
6409         space_info = __find_space_info(fs_info, flags);
6410         BUG_ON(!space_info); /* Logic bug */
6411         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6412 }
6413
6414
6415 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6416                                 struct btrfs_root *root,
6417                                 struct btrfs_delayed_ref_node *node, u64 parent,
6418                                 u64 root_objectid, u64 owner_objectid,
6419                                 u64 owner_offset, int refs_to_drop,
6420                                 struct btrfs_delayed_extent_op *extent_op)
6421 {
6422         struct btrfs_key key;
6423         struct btrfs_path *path;
6424         struct btrfs_fs_info *info = root->fs_info;
6425         struct btrfs_root *extent_root = info->extent_root;
6426         struct extent_buffer *leaf;
6427         struct btrfs_extent_item *ei;
6428         struct btrfs_extent_inline_ref *iref;
6429         int ret;
6430         int is_data;
6431         int extent_slot = 0;
6432         int found_extent = 0;
6433         int num_to_del = 1;
6434         int no_quota = node->no_quota;
6435         u32 item_size;
6436         u64 refs;
6437         u64 bytenr = node->bytenr;
6438         u64 num_bytes = node->num_bytes;
6439         int last_ref = 0;
6440         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6441                                                  SKINNY_METADATA);
6442
6443         if (!info->quota_enabled || !is_fstree(root_objectid))
6444                 no_quota = 1;
6445
6446         path = btrfs_alloc_path();
6447         if (!path)
6448                 return -ENOMEM;
6449
6450         path->reada = 1;
6451         path->leave_spinning = 1;
6452
6453         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6454         BUG_ON(!is_data && refs_to_drop != 1);
6455
6456         if (is_data)
6457                 skinny_metadata = 0;
6458
6459         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6460                                     bytenr, num_bytes, parent,
6461                                     root_objectid, owner_objectid,
6462                                     owner_offset);
6463         if (ret == 0) {
6464                 extent_slot = path->slots[0];
6465                 while (extent_slot >= 0) {
6466                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6467                                               extent_slot);
6468                         if (key.objectid != bytenr)
6469                                 break;
6470                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6471                             key.offset == num_bytes) {
6472                                 found_extent = 1;
6473                                 break;
6474                         }
6475                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6476                             key.offset == owner_objectid) {
6477                                 found_extent = 1;
6478                                 break;
6479                         }
6480                         if (path->slots[0] - extent_slot > 5)
6481                                 break;
6482                         extent_slot--;
6483                 }
6484 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6485                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6486                 if (found_extent && item_size < sizeof(*ei))
6487                         found_extent = 0;
6488 #endif
6489                 if (!found_extent) {
6490                         BUG_ON(iref);
6491                         ret = remove_extent_backref(trans, extent_root, path,
6492                                                     NULL, refs_to_drop,
6493                                                     is_data, &last_ref);
6494                         if (ret) {
6495                                 btrfs_abort_transaction(trans, extent_root, ret);
6496                                 goto out;
6497                         }
6498                         btrfs_release_path(path);
6499                         path->leave_spinning = 1;
6500
6501                         key.objectid = bytenr;
6502                         key.type = BTRFS_EXTENT_ITEM_KEY;
6503                         key.offset = num_bytes;
6504
6505                         if (!is_data && skinny_metadata) {
6506                                 key.type = BTRFS_METADATA_ITEM_KEY;
6507                                 key.offset = owner_objectid;
6508                         }
6509
6510                         ret = btrfs_search_slot(trans, extent_root,
6511                                                 &key, path, -1, 1);
6512                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6513                                 /*
6514                                  * Couldn't find our skinny metadata item,
6515                                  * see if we have ye olde extent item.
6516                                  */
6517                                 path->slots[0]--;
6518                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6519                                                       path->slots[0]);
6520                                 if (key.objectid == bytenr &&
6521                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6522                                     key.offset == num_bytes)
6523                                         ret = 0;
6524                         }
6525
6526                         if (ret > 0 && skinny_metadata) {
6527                                 skinny_metadata = false;
6528                                 key.objectid = bytenr;
6529                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6530                                 key.offset = num_bytes;
6531                                 btrfs_release_path(path);
6532                                 ret = btrfs_search_slot(trans, extent_root,
6533                                                         &key, path, -1, 1);
6534                         }
6535
6536                         if (ret) {
6537                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6538                                         ret, bytenr);
6539                                 if (ret > 0)
6540                                         btrfs_print_leaf(extent_root,
6541                                                          path->nodes[0]);
6542                         }
6543                         if (ret < 0) {
6544                                 btrfs_abort_transaction(trans, extent_root, ret);
6545                                 goto out;
6546                         }
6547                         extent_slot = path->slots[0];
6548                 }
6549         } else if (WARN_ON(ret == -ENOENT)) {
6550                 btrfs_print_leaf(extent_root, path->nodes[0]);
6551                 btrfs_err(info,
6552                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6553                         bytenr, parent, root_objectid, owner_objectid,
6554                         owner_offset);
6555                 btrfs_abort_transaction(trans, extent_root, ret);
6556                 goto out;
6557         } else {
6558                 btrfs_abort_transaction(trans, extent_root, ret);
6559                 goto out;
6560         }
6561
6562         leaf = path->nodes[0];
6563         item_size = btrfs_item_size_nr(leaf, extent_slot);
6564 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6565         if (item_size < sizeof(*ei)) {
6566                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6567                 ret = convert_extent_item_v0(trans, extent_root, path,
6568                                              owner_objectid, 0);
6569                 if (ret < 0) {
6570                         btrfs_abort_transaction(trans, extent_root, ret);
6571                         goto out;
6572                 }
6573
6574                 btrfs_release_path(path);
6575                 path->leave_spinning = 1;
6576
6577                 key.objectid = bytenr;
6578                 key.type = BTRFS_EXTENT_ITEM_KEY;
6579                 key.offset = num_bytes;
6580
6581                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6582                                         -1, 1);
6583                 if (ret) {
6584                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6585                                 ret, bytenr);
6586                         btrfs_print_leaf(extent_root, path->nodes[0]);
6587                 }
6588                 if (ret < 0) {
6589                         btrfs_abort_transaction(trans, extent_root, ret);
6590                         goto out;
6591                 }
6592
6593                 extent_slot = path->slots[0];
6594                 leaf = path->nodes[0];
6595                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6596         }
6597 #endif
6598         BUG_ON(item_size < sizeof(*ei));
6599         ei = btrfs_item_ptr(leaf, extent_slot,
6600                             struct btrfs_extent_item);
6601         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6602             key.type == BTRFS_EXTENT_ITEM_KEY) {
6603                 struct btrfs_tree_block_info *bi;
6604                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6605                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6606                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6607         }
6608
6609         refs = btrfs_extent_refs(leaf, ei);
6610         if (refs < refs_to_drop) {
6611                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6612                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6613                 ret = -EINVAL;
6614                 btrfs_abort_transaction(trans, extent_root, ret);
6615                 goto out;
6616         }
6617         refs -= refs_to_drop;
6618
6619         if (refs > 0) {
6620                 if (extent_op)
6621                         __run_delayed_extent_op(extent_op, leaf, ei);
6622                 /*
6623                  * In the case of inline back ref, reference count will
6624                  * be updated by remove_extent_backref
6625                  */
6626                 if (iref) {
6627                         BUG_ON(!found_extent);
6628                 } else {
6629                         btrfs_set_extent_refs(leaf, ei, refs);
6630                         btrfs_mark_buffer_dirty(leaf);
6631                 }
6632                 if (found_extent) {
6633                         ret = remove_extent_backref(trans, extent_root, path,
6634                                                     iref, refs_to_drop,
6635                                                     is_data, &last_ref);
6636                         if (ret) {
6637                                 btrfs_abort_transaction(trans, extent_root, ret);
6638                                 goto out;
6639                         }
6640                 }
6641                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6642                                  root_objectid);
6643         } else {
6644                 if (found_extent) {
6645                         BUG_ON(is_data && refs_to_drop !=
6646                                extent_data_ref_count(path, iref));
6647                         if (iref) {
6648                                 BUG_ON(path->slots[0] != extent_slot);
6649                         } else {
6650                                 BUG_ON(path->slots[0] != extent_slot + 1);
6651                                 path->slots[0] = extent_slot;
6652                                 num_to_del = 2;
6653                         }
6654                 }
6655
6656                 last_ref = 1;
6657                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6658                                       num_to_del);
6659                 if (ret) {
6660                         btrfs_abort_transaction(trans, extent_root, ret);
6661                         goto out;
6662                 }
6663                 btrfs_release_path(path);
6664
6665                 if (is_data) {
6666                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6667                         if (ret) {
6668                                 btrfs_abort_transaction(trans, extent_root, ret);
6669                                 goto out;
6670                         }
6671                 }
6672
6673                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6674                 if (ret) {
6675                         btrfs_abort_transaction(trans, extent_root, ret);
6676                         goto out;
6677                 }
6678         }
6679         btrfs_release_path(path);
6680
6681 out:
6682         btrfs_free_path(path);
6683         return ret;
6684 }
6685
6686 /*
6687  * when we free an block, it is possible (and likely) that we free the last
6688  * delayed ref for that extent as well.  This searches the delayed ref tree for
6689  * a given extent, and if there are no other delayed refs to be processed, it
6690  * removes it from the tree.
6691  */
6692 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6693                                       struct btrfs_root *root, u64 bytenr)
6694 {
6695         struct btrfs_delayed_ref_head *head;
6696         struct btrfs_delayed_ref_root *delayed_refs;
6697         int ret = 0;
6698
6699         delayed_refs = &trans->transaction->delayed_refs;
6700         spin_lock(&delayed_refs->lock);
6701         head = btrfs_find_delayed_ref_head(trans, bytenr);
6702         if (!head)
6703                 goto out_delayed_unlock;
6704
6705         spin_lock(&head->lock);
6706         if (!list_empty(&head->ref_list))
6707                 goto out;
6708
6709         if (head->extent_op) {
6710                 if (!head->must_insert_reserved)
6711                         goto out;
6712                 btrfs_free_delayed_extent_op(head->extent_op);
6713                 head->extent_op = NULL;
6714         }
6715
6716         /*
6717          * waiting for the lock here would deadlock.  If someone else has it
6718          * locked they are already in the process of dropping it anyway
6719          */
6720         if (!mutex_trylock(&head->mutex))
6721                 goto out;
6722
6723         /*
6724          * at this point we have a head with no other entries.  Go
6725          * ahead and process it.
6726          */
6727         head->node.in_tree = 0;
6728         rb_erase(&head->href_node, &delayed_refs->href_root);
6729
6730         atomic_dec(&delayed_refs->num_entries);
6731
6732         /*
6733          * we don't take a ref on the node because we're removing it from the
6734          * tree, so we just steal the ref the tree was holding.
6735          */
6736         delayed_refs->num_heads--;
6737         if (head->processing == 0)
6738                 delayed_refs->num_heads_ready--;
6739         head->processing = 0;
6740         spin_unlock(&head->lock);
6741         spin_unlock(&delayed_refs->lock);
6742
6743         BUG_ON(head->extent_op);
6744         if (head->must_insert_reserved)
6745                 ret = 1;
6746
6747         mutex_unlock(&head->mutex);
6748         btrfs_put_delayed_ref(&head->node);
6749         return ret;
6750 out:
6751         spin_unlock(&head->lock);
6752
6753 out_delayed_unlock:
6754         spin_unlock(&delayed_refs->lock);
6755         return 0;
6756 }
6757
6758 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6759                            struct btrfs_root *root,
6760                            struct extent_buffer *buf,
6761                            u64 parent, int last_ref)
6762 {
6763         int pin = 1;
6764         int ret;
6765
6766         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6767                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6768                                         buf->start, buf->len,
6769                                         parent, root->root_key.objectid,
6770                                         btrfs_header_level(buf),
6771                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6772                 BUG_ON(ret); /* -ENOMEM */
6773         }
6774
6775         if (!last_ref)
6776                 return;
6777
6778         if (btrfs_header_generation(buf) == trans->transid) {
6779                 struct btrfs_block_group_cache *cache;
6780
6781                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6782                         ret = check_ref_cleanup(trans, root, buf->start);
6783                         if (!ret)
6784                                 goto out;
6785                 }
6786
6787                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6788
6789                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6790                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6791                         btrfs_put_block_group(cache);
6792                         goto out;
6793                 }
6794
6795                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6796
6797                 btrfs_add_free_space(cache, buf->start, buf->len);
6798                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6799                 btrfs_put_block_group(cache);
6800                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6801                 pin = 0;
6802         }
6803 out:
6804         if (pin)
6805                 add_pinned_bytes(root->fs_info, buf->len,
6806                                  btrfs_header_level(buf),
6807                                  root->root_key.objectid);
6808
6809         /*
6810          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6811          * anymore.
6812          */
6813         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6814 }
6815
6816 /* Can return -ENOMEM */
6817 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6818                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6819                       u64 owner, u64 offset, int no_quota)
6820 {
6821         int ret;
6822         struct btrfs_fs_info *fs_info = root->fs_info;
6823
6824         if (btrfs_test_is_dummy_root(root))
6825                 return 0;
6826
6827         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6828
6829         /*
6830          * tree log blocks never actually go into the extent allocation
6831          * tree, just update pinning info and exit early.
6832          */
6833         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6834                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6835                 /* unlocks the pinned mutex */
6836                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6837                 ret = 0;
6838         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6839                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6840                                         num_bytes,
6841                                         parent, root_objectid, (int)owner,
6842                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6843         } else {
6844                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6845                                                 num_bytes,
6846                                                 parent, root_objectid, owner,
6847                                                 offset, BTRFS_DROP_DELAYED_REF,
6848                                                 NULL, no_quota);
6849         }
6850         return ret;
6851 }
6852
6853 /*
6854  * when we wait for progress in the block group caching, its because
6855  * our allocation attempt failed at least once.  So, we must sleep
6856  * and let some progress happen before we try again.
6857  *
6858  * This function will sleep at least once waiting for new free space to
6859  * show up, and then it will check the block group free space numbers
6860  * for our min num_bytes.  Another option is to have it go ahead
6861  * and look in the rbtree for a free extent of a given size, but this
6862  * is a good start.
6863  *
6864  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6865  * any of the information in this block group.
6866  */
6867 static noinline void
6868 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6869                                 u64 num_bytes)
6870 {
6871         struct btrfs_caching_control *caching_ctl;
6872
6873         caching_ctl = get_caching_control(cache);
6874         if (!caching_ctl)
6875                 return;
6876
6877         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6878                    (cache->free_space_ctl->free_space >= num_bytes));
6879
6880         put_caching_control(caching_ctl);
6881 }
6882
6883 static noinline int
6884 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6885 {
6886         struct btrfs_caching_control *caching_ctl;
6887         int ret = 0;
6888
6889         caching_ctl = get_caching_control(cache);
6890         if (!caching_ctl)
6891                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6892
6893         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6894         if (cache->cached == BTRFS_CACHE_ERROR)
6895                 ret = -EIO;
6896         put_caching_control(caching_ctl);
6897         return ret;
6898 }
6899
6900 int __get_raid_index(u64 flags)
6901 {
6902         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6903                 return BTRFS_RAID_RAID10;
6904         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6905                 return BTRFS_RAID_RAID1;
6906         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6907                 return BTRFS_RAID_DUP;
6908         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6909                 return BTRFS_RAID_RAID0;
6910         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6911                 return BTRFS_RAID_RAID5;
6912         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6913                 return BTRFS_RAID_RAID6;
6914
6915         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6916 }
6917
6918 int get_block_group_index(struct btrfs_block_group_cache *cache)
6919 {
6920         return __get_raid_index(cache->flags);
6921 }
6922
6923 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6924         [BTRFS_RAID_RAID10]     = "raid10",
6925         [BTRFS_RAID_RAID1]      = "raid1",
6926         [BTRFS_RAID_DUP]        = "dup",
6927         [BTRFS_RAID_RAID0]      = "raid0",
6928         [BTRFS_RAID_SINGLE]     = "single",
6929         [BTRFS_RAID_RAID5]      = "raid5",
6930         [BTRFS_RAID_RAID6]      = "raid6",
6931 };
6932
6933 static const char *get_raid_name(enum btrfs_raid_types type)
6934 {
6935         if (type >= BTRFS_NR_RAID_TYPES)
6936                 return NULL;
6937
6938         return btrfs_raid_type_names[type];
6939 }
6940
6941 enum btrfs_loop_type {
6942         LOOP_CACHING_NOWAIT = 0,
6943         LOOP_CACHING_WAIT = 1,
6944         LOOP_ALLOC_CHUNK = 2,
6945         LOOP_NO_EMPTY_SIZE = 3,
6946 };
6947
6948 static inline void
6949 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6950                        int delalloc)
6951 {
6952         if (delalloc)
6953                 down_read(&cache->data_rwsem);
6954 }
6955
6956 static inline void
6957 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6958                        int delalloc)
6959 {
6960         btrfs_get_block_group(cache);
6961         if (delalloc)
6962                 down_read(&cache->data_rwsem);
6963 }
6964
6965 static struct btrfs_block_group_cache *
6966 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6967                    struct btrfs_free_cluster *cluster,
6968                    int delalloc)
6969 {
6970         struct btrfs_block_group_cache *used_bg;
6971         bool locked = false;
6972 again:
6973         spin_lock(&cluster->refill_lock);
6974         if (locked) {
6975                 if (used_bg == cluster->block_group)
6976                         return used_bg;
6977
6978                 up_read(&used_bg->data_rwsem);
6979                 btrfs_put_block_group(used_bg);
6980         }
6981
6982         used_bg = cluster->block_group;
6983         if (!used_bg)
6984                 return NULL;
6985
6986         if (used_bg == block_group)
6987                 return used_bg;
6988
6989         btrfs_get_block_group(used_bg);
6990
6991         if (!delalloc)
6992                 return used_bg;
6993
6994         if (down_read_trylock(&used_bg->data_rwsem))
6995                 return used_bg;
6996
6997         spin_unlock(&cluster->refill_lock);
6998         down_read(&used_bg->data_rwsem);
6999         locked = true;
7000         goto again;
7001 }
7002
7003 static inline void
7004 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7005                          int delalloc)
7006 {
7007         if (delalloc)
7008                 up_read(&cache->data_rwsem);
7009         btrfs_put_block_group(cache);
7010 }
7011
7012 /*
7013  * walks the btree of allocated extents and find a hole of a given size.
7014  * The key ins is changed to record the hole:
7015  * ins->objectid == start position
7016  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7017  * ins->offset == the size of the hole.
7018  * Any available blocks before search_start are skipped.
7019  *
7020  * If there is no suitable free space, we will record the max size of
7021  * the free space extent currently.
7022  */
7023 static noinline int find_free_extent(struct btrfs_root *orig_root,
7024                                      u64 num_bytes, u64 empty_size,
7025                                      u64 hint_byte, struct btrfs_key *ins,
7026                                      u64 flags, int delalloc)
7027 {
7028         int ret = 0;
7029         struct btrfs_root *root = orig_root->fs_info->extent_root;
7030         struct btrfs_free_cluster *last_ptr = NULL;
7031         struct btrfs_block_group_cache *block_group = NULL;
7032         u64 search_start = 0;
7033         u64 max_extent_size = 0;
7034         u64 empty_cluster = 0;
7035         struct btrfs_space_info *space_info;
7036         int loop = 0;
7037         int index = __get_raid_index(flags);
7038         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7039                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7040         bool failed_cluster_refill = false;
7041         bool failed_alloc = false;
7042         bool use_cluster = true;
7043         bool have_caching_bg = false;
7044         bool full_search = false;
7045
7046         WARN_ON(num_bytes < root->sectorsize);
7047         ins->type = BTRFS_EXTENT_ITEM_KEY;
7048         ins->objectid = 0;
7049         ins->offset = 0;
7050
7051         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7052
7053         space_info = __find_space_info(root->fs_info, flags);
7054         if (!space_info) {
7055                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7056                 return -ENOSPC;
7057         }
7058
7059         /*
7060          * If our free space is heavily fragmented we may not be able to make
7061          * big contiguous allocations, so instead of doing the expensive search
7062          * for free space, simply return ENOSPC with our max_extent_size so we
7063          * can go ahead and search for a more manageable chunk.
7064          *
7065          * If our max_extent_size is large enough for our allocation simply
7066          * disable clustering since we will likely not be able to find enough
7067          * space to create a cluster and induce latency trying.
7068          */
7069         if (unlikely(space_info->max_extent_size)) {
7070                 spin_lock(&space_info->lock);
7071                 if (space_info->max_extent_size &&
7072                     num_bytes > space_info->max_extent_size) {
7073                         ins->offset = space_info->max_extent_size;
7074                         spin_unlock(&space_info->lock);
7075                         return -ENOSPC;
7076                 } else if (space_info->max_extent_size) {
7077                         use_cluster = false;
7078                 }
7079                 spin_unlock(&space_info->lock);
7080         }
7081
7082         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7083         if (last_ptr) {
7084                 spin_lock(&last_ptr->lock);
7085                 if (last_ptr->block_group)
7086                         hint_byte = last_ptr->window_start;
7087                 if (last_ptr->fragmented) {
7088                         /*
7089                          * We still set window_start so we can keep track of the
7090                          * last place we found an allocation to try and save
7091                          * some time.
7092                          */
7093                         hint_byte = last_ptr->window_start;
7094                         use_cluster = false;
7095                 }
7096                 spin_unlock(&last_ptr->lock);
7097         }
7098
7099         search_start = max(search_start, first_logical_byte(root, 0));
7100         search_start = max(search_start, hint_byte);
7101         if (search_start == hint_byte) {
7102                 block_group = btrfs_lookup_block_group(root->fs_info,
7103                                                        search_start);
7104                 /*
7105                  * we don't want to use the block group if it doesn't match our
7106                  * allocation bits, or if its not cached.
7107                  *
7108                  * However if we are re-searching with an ideal block group
7109                  * picked out then we don't care that the block group is cached.
7110                  */
7111                 if (block_group && block_group_bits(block_group, flags) &&
7112                     block_group->cached != BTRFS_CACHE_NO) {
7113                         down_read(&space_info->groups_sem);
7114                         if (list_empty(&block_group->list) ||
7115                             block_group->ro) {
7116                                 /*
7117                                  * someone is removing this block group,
7118                                  * we can't jump into the have_block_group
7119                                  * target because our list pointers are not
7120                                  * valid
7121                                  */
7122                                 btrfs_put_block_group(block_group);
7123                                 up_read(&space_info->groups_sem);
7124                         } else {
7125                                 index = get_block_group_index(block_group);
7126                                 btrfs_lock_block_group(block_group, delalloc);
7127                                 goto have_block_group;
7128                         }
7129                 } else if (block_group) {
7130                         btrfs_put_block_group(block_group);
7131                 }
7132         }
7133 search:
7134         have_caching_bg = false;
7135         if (index == 0 || index == __get_raid_index(flags))
7136                 full_search = true;
7137         down_read(&space_info->groups_sem);
7138         list_for_each_entry(block_group, &space_info->block_groups[index],
7139                             list) {
7140                 u64 offset;
7141                 int cached;
7142
7143                 btrfs_grab_block_group(block_group, delalloc);
7144                 search_start = block_group->key.objectid;
7145
7146                 /*
7147                  * this can happen if we end up cycling through all the
7148                  * raid types, but we want to make sure we only allocate
7149                  * for the proper type.
7150                  */
7151                 if (!block_group_bits(block_group, flags)) {
7152                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7153                                 BTRFS_BLOCK_GROUP_RAID1 |
7154                                 BTRFS_BLOCK_GROUP_RAID5 |
7155                                 BTRFS_BLOCK_GROUP_RAID6 |
7156                                 BTRFS_BLOCK_GROUP_RAID10;
7157
7158                         /*
7159                          * if they asked for extra copies and this block group
7160                          * doesn't provide them, bail.  This does allow us to
7161                          * fill raid0 from raid1.
7162                          */
7163                         if ((flags & extra) && !(block_group->flags & extra))
7164                                 goto loop;
7165                 }
7166
7167 have_block_group:
7168                 cached = block_group_cache_done(block_group);
7169                 if (unlikely(!cached)) {
7170                         have_caching_bg = true;
7171                         ret = cache_block_group(block_group, 0);
7172                         BUG_ON(ret < 0);
7173                         ret = 0;
7174                 }
7175
7176                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7177                         goto loop;
7178                 if (unlikely(block_group->ro))
7179                         goto loop;
7180
7181                 /*
7182                  * Ok we want to try and use the cluster allocator, so
7183                  * lets look there
7184                  */
7185                 if (last_ptr && use_cluster) {
7186                         struct btrfs_block_group_cache *used_block_group;
7187                         unsigned long aligned_cluster;
7188                         /*
7189                          * the refill lock keeps out other
7190                          * people trying to start a new cluster
7191                          */
7192                         used_block_group = btrfs_lock_cluster(block_group,
7193                                                               last_ptr,
7194                                                               delalloc);
7195                         if (!used_block_group)
7196                                 goto refill_cluster;
7197
7198                         if (used_block_group != block_group &&
7199                             (used_block_group->ro ||
7200                              !block_group_bits(used_block_group, flags)))
7201                                 goto release_cluster;
7202
7203                         offset = btrfs_alloc_from_cluster(used_block_group,
7204                                                 last_ptr,
7205                                                 num_bytes,
7206                                                 used_block_group->key.objectid,
7207                                                 &max_extent_size);
7208                         if (offset) {
7209                                 /* we have a block, we're done */
7210                                 spin_unlock(&last_ptr->refill_lock);
7211                                 trace_btrfs_reserve_extent_cluster(root,
7212                                                 used_block_group,
7213                                                 search_start, num_bytes);
7214                                 if (used_block_group != block_group) {
7215                                         btrfs_release_block_group(block_group,
7216                                                                   delalloc);
7217                                         block_group = used_block_group;
7218                                 }
7219                                 goto checks;
7220                         }
7221
7222                         WARN_ON(last_ptr->block_group != used_block_group);
7223 release_cluster:
7224                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7225                          * set up a new clusters, so lets just skip it
7226                          * and let the allocator find whatever block
7227                          * it can find.  If we reach this point, we
7228                          * will have tried the cluster allocator
7229                          * plenty of times and not have found
7230                          * anything, so we are likely way too
7231                          * fragmented for the clustering stuff to find
7232                          * anything.
7233                          *
7234                          * However, if the cluster is taken from the
7235                          * current block group, release the cluster
7236                          * first, so that we stand a better chance of
7237                          * succeeding in the unclustered
7238                          * allocation.  */
7239                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7240                             used_block_group != block_group) {
7241                                 spin_unlock(&last_ptr->refill_lock);
7242                                 btrfs_release_block_group(used_block_group,
7243                                                           delalloc);
7244                                 goto unclustered_alloc;
7245                         }
7246
7247                         /*
7248                          * this cluster didn't work out, free it and
7249                          * start over
7250                          */
7251                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7252
7253                         if (used_block_group != block_group)
7254                                 btrfs_release_block_group(used_block_group,
7255                                                           delalloc);
7256 refill_cluster:
7257                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7258                                 spin_unlock(&last_ptr->refill_lock);
7259                                 goto unclustered_alloc;
7260                         }
7261
7262                         aligned_cluster = max_t(unsigned long,
7263                                                 empty_cluster + empty_size,
7264                                               block_group->full_stripe_len);
7265
7266                         /* allocate a cluster in this block group */
7267                         ret = btrfs_find_space_cluster(root, block_group,
7268                                                        last_ptr, search_start,
7269                                                        num_bytes,
7270                                                        aligned_cluster);
7271                         if (ret == 0) {
7272                                 /*
7273                                  * now pull our allocation out of this
7274                                  * cluster
7275                                  */
7276                                 offset = btrfs_alloc_from_cluster(block_group,
7277                                                         last_ptr,
7278                                                         num_bytes,
7279                                                         search_start,
7280                                                         &max_extent_size);
7281                                 if (offset) {
7282                                         /* we found one, proceed */
7283                                         spin_unlock(&last_ptr->refill_lock);
7284                                         trace_btrfs_reserve_extent_cluster(root,
7285                                                 block_group, search_start,
7286                                                 num_bytes);
7287                                         goto checks;
7288                                 }
7289                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7290                                    && !failed_cluster_refill) {
7291                                 spin_unlock(&last_ptr->refill_lock);
7292
7293                                 failed_cluster_refill = true;
7294                                 wait_block_group_cache_progress(block_group,
7295                                        num_bytes + empty_cluster + empty_size);
7296                                 goto have_block_group;
7297                         }
7298
7299                         /*
7300                          * at this point we either didn't find a cluster
7301                          * or we weren't able to allocate a block from our
7302                          * cluster.  Free the cluster we've been trying
7303                          * to use, and go to the next block group
7304                          */
7305                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7306                         spin_unlock(&last_ptr->refill_lock);
7307                         goto loop;
7308                 }
7309
7310 unclustered_alloc:
7311                 /*
7312                  * We are doing an unclustered alloc, set the fragmented flag so
7313                  * we don't bother trying to setup a cluster again until we get
7314                  * more space.
7315                  */
7316                 if (unlikely(last_ptr)) {
7317                         spin_lock(&last_ptr->lock);
7318                         last_ptr->fragmented = 1;
7319                         spin_unlock(&last_ptr->lock);
7320                 }
7321                 spin_lock(&block_group->free_space_ctl->tree_lock);
7322                 if (cached &&
7323                     block_group->free_space_ctl->free_space <
7324                     num_bytes + empty_cluster + empty_size) {
7325                         if (block_group->free_space_ctl->free_space >
7326                             max_extent_size)
7327                                 max_extent_size =
7328                                         block_group->free_space_ctl->free_space;
7329                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7330                         goto loop;
7331                 }
7332                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7333
7334                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7335                                                     num_bytes, empty_size,
7336                                                     &max_extent_size);
7337                 /*
7338                  * If we didn't find a chunk, and we haven't failed on this
7339                  * block group before, and this block group is in the middle of
7340                  * caching and we are ok with waiting, then go ahead and wait
7341                  * for progress to be made, and set failed_alloc to true.
7342                  *
7343                  * If failed_alloc is true then we've already waited on this
7344                  * block group once and should move on to the next block group.
7345                  */
7346                 if (!offset && !failed_alloc && !cached &&
7347                     loop > LOOP_CACHING_NOWAIT) {
7348                         wait_block_group_cache_progress(block_group,
7349                                                 num_bytes + empty_size);
7350                         failed_alloc = true;
7351                         goto have_block_group;
7352                 } else if (!offset) {
7353                         goto loop;
7354                 }
7355 checks:
7356                 search_start = ALIGN(offset, root->stripesize);
7357
7358                 /* move on to the next group */
7359                 if (search_start + num_bytes >
7360                     block_group->key.objectid + block_group->key.offset) {
7361                         btrfs_add_free_space(block_group, offset, num_bytes);
7362                         goto loop;
7363                 }
7364
7365                 if (offset < search_start)
7366                         btrfs_add_free_space(block_group, offset,
7367                                              search_start - offset);
7368                 BUG_ON(offset > search_start);
7369
7370                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7371                                                   alloc_type, delalloc);
7372                 if (ret == -EAGAIN) {
7373                         btrfs_add_free_space(block_group, offset, num_bytes);
7374                         goto loop;
7375                 }
7376
7377                 /* we are all good, lets return */
7378                 ins->objectid = search_start;
7379                 ins->offset = num_bytes;
7380
7381                 trace_btrfs_reserve_extent(orig_root, block_group,
7382                                            search_start, num_bytes);
7383                 btrfs_release_block_group(block_group, delalloc);
7384                 break;
7385 loop:
7386                 failed_cluster_refill = false;
7387                 failed_alloc = false;
7388                 BUG_ON(index != get_block_group_index(block_group));
7389                 btrfs_release_block_group(block_group, delalloc);
7390         }
7391         up_read(&space_info->groups_sem);
7392
7393         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7394                 goto search;
7395
7396         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7397                 goto search;
7398
7399         /*
7400          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7401          *                      caching kthreads as we move along
7402          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7403          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7404          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7405          *                      again
7406          */
7407         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7408                 index = 0;
7409                 if (loop == LOOP_CACHING_NOWAIT) {
7410                         /*
7411                          * We want to skip the LOOP_CACHING_WAIT step if we
7412                          * don't have any unached bgs and we've alrelady done a
7413                          * full search through.
7414                          */
7415                         if (have_caching_bg || !full_search)
7416                                 loop = LOOP_CACHING_WAIT;
7417                         else
7418                                 loop = LOOP_ALLOC_CHUNK;
7419                 } else {
7420                         loop++;
7421                 }
7422
7423                 if (loop == LOOP_ALLOC_CHUNK) {
7424                         struct btrfs_trans_handle *trans;
7425                         int exist = 0;
7426
7427                         trans = current->journal_info;
7428                         if (trans)
7429                                 exist = 1;
7430                         else
7431                                 trans = btrfs_join_transaction(root);
7432
7433                         if (IS_ERR(trans)) {
7434                                 ret = PTR_ERR(trans);
7435                                 goto out;
7436                         }
7437
7438                         ret = do_chunk_alloc(trans, root, flags,
7439                                              CHUNK_ALLOC_FORCE);
7440
7441                         /*
7442                          * If we can't allocate a new chunk we've already looped
7443                          * through at least once, move on to the NO_EMPTY_SIZE
7444                          * case.
7445                          */
7446                         if (ret == -ENOSPC)
7447                                 loop = LOOP_NO_EMPTY_SIZE;
7448
7449                         /*
7450                          * Do not bail out on ENOSPC since we
7451                          * can do more things.
7452                          */
7453                         if (ret < 0 && ret != -ENOSPC)
7454                                 btrfs_abort_transaction(trans,
7455                                                         root, ret);
7456                         else
7457                                 ret = 0;
7458                         if (!exist)
7459                                 btrfs_end_transaction(trans, root);
7460                         if (ret)
7461                                 goto out;
7462                 }
7463
7464                 if (loop == LOOP_NO_EMPTY_SIZE) {
7465                         /*
7466                          * Don't loop again if we already have no empty_size and
7467                          * no empty_cluster.
7468                          */
7469                         if (empty_size == 0 &&
7470                             empty_cluster == 0) {
7471                                 ret = -ENOSPC;
7472                                 goto out;
7473                         }
7474                         empty_size = 0;
7475                         empty_cluster = 0;
7476                 }
7477
7478                 goto search;
7479         } else if (!ins->objectid) {
7480                 ret = -ENOSPC;
7481         } else if (ins->objectid) {
7482                 if (!use_cluster && last_ptr) {
7483                         spin_lock(&last_ptr->lock);
7484                         last_ptr->window_start = ins->objectid;
7485                         spin_unlock(&last_ptr->lock);
7486                 }
7487                 ret = 0;
7488         }
7489 out:
7490         if (ret == -ENOSPC) {
7491                 spin_lock(&space_info->lock);
7492                 space_info->max_extent_size = max_extent_size;
7493                 spin_unlock(&space_info->lock);
7494                 ins->offset = max_extent_size;
7495         }
7496         return ret;
7497 }
7498
7499 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7500                             int dump_block_groups)
7501 {
7502         struct btrfs_block_group_cache *cache;
7503         int index = 0;
7504
7505         spin_lock(&info->lock);
7506         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7507                info->flags,
7508                info->total_bytes - info->bytes_used - info->bytes_pinned -
7509                info->bytes_reserved - info->bytes_readonly,
7510                (info->full) ? "" : "not ");
7511         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7512                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7513                info->total_bytes, info->bytes_used, info->bytes_pinned,
7514                info->bytes_reserved, info->bytes_may_use,
7515                info->bytes_readonly);
7516         spin_unlock(&info->lock);
7517
7518         if (!dump_block_groups)
7519                 return;
7520
7521         down_read(&info->groups_sem);
7522 again:
7523         list_for_each_entry(cache, &info->block_groups[index], list) {
7524                 spin_lock(&cache->lock);
7525                 printk(KERN_INFO "BTRFS: "
7526                            "block group %llu has %llu bytes, "
7527                            "%llu used %llu pinned %llu reserved %s\n",
7528                        cache->key.objectid, cache->key.offset,
7529                        btrfs_block_group_used(&cache->item), cache->pinned,
7530                        cache->reserved, cache->ro ? "[readonly]" : "");
7531                 btrfs_dump_free_space(cache, bytes);
7532                 spin_unlock(&cache->lock);
7533         }
7534         if (++index < BTRFS_NR_RAID_TYPES)
7535                 goto again;
7536         up_read(&info->groups_sem);
7537 }
7538
7539 int btrfs_reserve_extent(struct btrfs_root *root,
7540                          u64 num_bytes, u64 min_alloc_size,
7541                          u64 empty_size, u64 hint_byte,
7542                          struct btrfs_key *ins, int is_data, int delalloc)
7543 {
7544         bool final_tried = num_bytes == min_alloc_size;
7545         u64 flags;
7546         int ret;
7547
7548         flags = btrfs_get_alloc_profile(root, is_data);
7549 again:
7550         WARN_ON(num_bytes < root->sectorsize);
7551         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7552                                flags, delalloc);
7553
7554         if (ret == -ENOSPC) {
7555                 if (!final_tried && ins->offset) {
7556                         num_bytes = min(num_bytes >> 1, ins->offset);
7557                         num_bytes = round_down(num_bytes, root->sectorsize);
7558                         num_bytes = max(num_bytes, min_alloc_size);
7559                         if (num_bytes == min_alloc_size)
7560                                 final_tried = true;
7561                         goto again;
7562                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7563                         struct btrfs_space_info *sinfo;
7564
7565                         sinfo = __find_space_info(root->fs_info, flags);
7566                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7567                                 flags, num_bytes);
7568                         if (sinfo)
7569                                 dump_space_info(sinfo, num_bytes, 1);
7570                 }
7571         }
7572
7573         return ret;
7574 }
7575
7576 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7577                                         u64 start, u64 len,
7578                                         int pin, int delalloc)
7579 {
7580         struct btrfs_block_group_cache *cache;
7581         int ret = 0;
7582
7583         cache = btrfs_lookup_block_group(root->fs_info, start);
7584         if (!cache) {
7585                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7586                         start);
7587                 return -ENOSPC;
7588         }
7589
7590         if (pin)
7591                 pin_down_extent(root, cache, start, len, 1);
7592         else {
7593                 if (btrfs_test_opt(root, DISCARD))
7594                         ret = btrfs_discard_extent(root, start, len, NULL);
7595                 btrfs_add_free_space(cache, start, len);
7596                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7597         }
7598
7599         btrfs_put_block_group(cache);
7600
7601         trace_btrfs_reserved_extent_free(root, start, len);
7602
7603         return ret;
7604 }
7605
7606 int btrfs_free_reserved_extent(struct btrfs_root *root,
7607                                u64 start, u64 len, int delalloc)
7608 {
7609         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7610 }
7611
7612 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7613                                        u64 start, u64 len)
7614 {
7615         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7616 }
7617
7618 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7619                                       struct btrfs_root *root,
7620                                       u64 parent, u64 root_objectid,
7621                                       u64 flags, u64 owner, u64 offset,
7622                                       struct btrfs_key *ins, int ref_mod)
7623 {
7624         int ret;
7625         struct btrfs_fs_info *fs_info = root->fs_info;
7626         struct btrfs_extent_item *extent_item;
7627         struct btrfs_extent_inline_ref *iref;
7628         struct btrfs_path *path;
7629         struct extent_buffer *leaf;
7630         int type;
7631         u32 size;
7632
7633         if (parent > 0)
7634                 type = BTRFS_SHARED_DATA_REF_KEY;
7635         else
7636                 type = BTRFS_EXTENT_DATA_REF_KEY;
7637
7638         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7639
7640         path = btrfs_alloc_path();
7641         if (!path)
7642                 return -ENOMEM;
7643
7644         path->leave_spinning = 1;
7645         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7646                                       ins, size);
7647         if (ret) {
7648                 btrfs_free_path(path);
7649                 return ret;
7650         }
7651
7652         leaf = path->nodes[0];
7653         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7654                                      struct btrfs_extent_item);
7655         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7656         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7657         btrfs_set_extent_flags(leaf, extent_item,
7658                                flags | BTRFS_EXTENT_FLAG_DATA);
7659
7660         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7661         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7662         if (parent > 0) {
7663                 struct btrfs_shared_data_ref *ref;
7664                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7665                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7666                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7667         } else {
7668                 struct btrfs_extent_data_ref *ref;
7669                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7670                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7671                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7672                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7673                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7674         }
7675
7676         btrfs_mark_buffer_dirty(path->nodes[0]);
7677         btrfs_free_path(path);
7678
7679         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7680         if (ret) { /* -ENOENT, logic error */
7681                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7682                         ins->objectid, ins->offset);
7683                 BUG();
7684         }
7685         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7686         return ret;
7687 }
7688
7689 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7690                                      struct btrfs_root *root,
7691                                      u64 parent, u64 root_objectid,
7692                                      u64 flags, struct btrfs_disk_key *key,
7693                                      int level, struct btrfs_key *ins,
7694                                      int no_quota)
7695 {
7696         int ret;
7697         struct btrfs_fs_info *fs_info = root->fs_info;
7698         struct btrfs_extent_item *extent_item;
7699         struct btrfs_tree_block_info *block_info;
7700         struct btrfs_extent_inline_ref *iref;
7701         struct btrfs_path *path;
7702         struct extent_buffer *leaf;
7703         u32 size = sizeof(*extent_item) + sizeof(*iref);
7704         u64 num_bytes = ins->offset;
7705         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7706                                                  SKINNY_METADATA);
7707
7708         if (!skinny_metadata)
7709                 size += sizeof(*block_info);
7710
7711         path = btrfs_alloc_path();
7712         if (!path) {
7713                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7714                                                    root->nodesize);
7715                 return -ENOMEM;
7716         }
7717
7718         path->leave_spinning = 1;
7719         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7720                                       ins, size);
7721         if (ret) {
7722                 btrfs_free_path(path);
7723                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7724                                                    root->nodesize);
7725                 return ret;
7726         }
7727
7728         leaf = path->nodes[0];
7729         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7730                                      struct btrfs_extent_item);
7731         btrfs_set_extent_refs(leaf, extent_item, 1);
7732         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7733         btrfs_set_extent_flags(leaf, extent_item,
7734                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7735
7736         if (skinny_metadata) {
7737                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7738                 num_bytes = root->nodesize;
7739         } else {
7740                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7741                 btrfs_set_tree_block_key(leaf, block_info, key);
7742                 btrfs_set_tree_block_level(leaf, block_info, level);
7743                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7744         }
7745
7746         if (parent > 0) {
7747                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7748                 btrfs_set_extent_inline_ref_type(leaf, iref,
7749                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7750                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7751         } else {
7752                 btrfs_set_extent_inline_ref_type(leaf, iref,
7753                                                  BTRFS_TREE_BLOCK_REF_KEY);
7754                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7755         }
7756
7757         btrfs_mark_buffer_dirty(leaf);
7758         btrfs_free_path(path);
7759
7760         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7761                                  1);
7762         if (ret) { /* -ENOENT, logic error */
7763                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7764                         ins->objectid, ins->offset);
7765                 BUG();
7766         }
7767
7768         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7769         return ret;
7770 }
7771
7772 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7773                                      struct btrfs_root *root,
7774                                      u64 root_objectid, u64 owner,
7775                                      u64 offset, struct btrfs_key *ins)
7776 {
7777         int ret;
7778
7779         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7780
7781         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7782                                          ins->offset, 0,
7783                                          root_objectid, owner, offset,
7784                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7785         return ret;
7786 }
7787
7788 /*
7789  * this is used by the tree logging recovery code.  It records that
7790  * an extent has been allocated and makes sure to clear the free
7791  * space cache bits as well
7792  */
7793 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7794                                    struct btrfs_root *root,
7795                                    u64 root_objectid, u64 owner, u64 offset,
7796                                    struct btrfs_key *ins)
7797 {
7798         int ret;
7799         struct btrfs_block_group_cache *block_group;
7800
7801         /*
7802          * Mixed block groups will exclude before processing the log so we only
7803          * need to do the exlude dance if this fs isn't mixed.
7804          */
7805         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7806                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7807                 if (ret)
7808                         return ret;
7809         }
7810
7811         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7812         if (!block_group)
7813                 return -EINVAL;
7814
7815         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7816                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7817         BUG_ON(ret); /* logic error */
7818         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7819                                          0, owner, offset, ins, 1);
7820         btrfs_put_block_group(block_group);
7821         return ret;
7822 }
7823
7824 static struct extent_buffer *
7825 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7826                       u64 bytenr, int level)
7827 {
7828         struct extent_buffer *buf;
7829
7830         buf = btrfs_find_create_tree_block(root, bytenr);
7831         if (!buf)
7832                 return ERR_PTR(-ENOMEM);
7833         btrfs_set_header_generation(buf, trans->transid);
7834         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7835         btrfs_tree_lock(buf);
7836         clean_tree_block(trans, root->fs_info, buf);
7837         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7838
7839         btrfs_set_lock_blocking(buf);
7840         btrfs_set_buffer_uptodate(buf);
7841
7842         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7843                 buf->log_index = root->log_transid % 2;
7844                 /*
7845                  * we allow two log transactions at a time, use different
7846                  * EXENT bit to differentiate dirty pages.
7847                  */
7848                 if (buf->log_index == 0)
7849                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7850                                         buf->start + buf->len - 1, GFP_NOFS);
7851                 else
7852                         set_extent_new(&root->dirty_log_pages, buf->start,
7853                                         buf->start + buf->len - 1, GFP_NOFS);
7854         } else {
7855                 buf->log_index = -1;
7856                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7857                          buf->start + buf->len - 1, GFP_NOFS);
7858         }
7859         trans->blocks_used++;
7860         /* this returns a buffer locked for blocking */
7861         return buf;
7862 }
7863
7864 static struct btrfs_block_rsv *
7865 use_block_rsv(struct btrfs_trans_handle *trans,
7866               struct btrfs_root *root, u32 blocksize)
7867 {
7868         struct btrfs_block_rsv *block_rsv;
7869         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7870         int ret;
7871         bool global_updated = false;
7872
7873         block_rsv = get_block_rsv(trans, root);
7874
7875         if (unlikely(block_rsv->size == 0))
7876                 goto try_reserve;
7877 again:
7878         ret = block_rsv_use_bytes(block_rsv, blocksize);
7879         if (!ret)
7880                 return block_rsv;
7881
7882         if (block_rsv->failfast)
7883                 return ERR_PTR(ret);
7884
7885         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7886                 global_updated = true;
7887                 update_global_block_rsv(root->fs_info);
7888                 goto again;
7889         }
7890
7891         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7892                 static DEFINE_RATELIMIT_STATE(_rs,
7893                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7894                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7895                 if (__ratelimit(&_rs))
7896                         WARN(1, KERN_DEBUG
7897                                 "BTRFS: block rsv returned %d\n", ret);
7898         }
7899 try_reserve:
7900         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7901                                      BTRFS_RESERVE_NO_FLUSH);
7902         if (!ret)
7903                 return block_rsv;
7904         /*
7905          * If we couldn't reserve metadata bytes try and use some from
7906          * the global reserve if its space type is the same as the global
7907          * reservation.
7908          */
7909         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7910             block_rsv->space_info == global_rsv->space_info) {
7911                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7912                 if (!ret)
7913                         return global_rsv;
7914         }
7915         return ERR_PTR(ret);
7916 }
7917
7918 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7919                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7920 {
7921         block_rsv_add_bytes(block_rsv, blocksize, 0);
7922         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7923 }
7924
7925 /*
7926  * finds a free extent and does all the dirty work required for allocation
7927  * returns the tree buffer or an ERR_PTR on error.
7928  */
7929 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7930                                         struct btrfs_root *root,
7931                                         u64 parent, u64 root_objectid,
7932                                         struct btrfs_disk_key *key, int level,
7933                                         u64 hint, u64 empty_size)
7934 {
7935         struct btrfs_key ins;
7936         struct btrfs_block_rsv *block_rsv;
7937         struct extent_buffer *buf;
7938         struct btrfs_delayed_extent_op *extent_op;
7939         u64 flags = 0;
7940         int ret;
7941         u32 blocksize = root->nodesize;
7942         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7943                                                  SKINNY_METADATA);
7944
7945         if (btrfs_test_is_dummy_root(root)) {
7946                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7947                                             level);
7948                 if (!IS_ERR(buf))
7949                         root->alloc_bytenr += blocksize;
7950                 return buf;
7951         }
7952
7953         block_rsv = use_block_rsv(trans, root, blocksize);
7954         if (IS_ERR(block_rsv))
7955                 return ERR_CAST(block_rsv);
7956
7957         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7958                                    empty_size, hint, &ins, 0, 0);
7959         if (ret)
7960                 goto out_unuse;
7961
7962         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7963         if (IS_ERR(buf)) {
7964                 ret = PTR_ERR(buf);
7965                 goto out_free_reserved;
7966         }
7967
7968         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7969                 if (parent == 0)
7970                         parent = ins.objectid;
7971                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7972         } else
7973                 BUG_ON(parent > 0);
7974
7975         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7976                 extent_op = btrfs_alloc_delayed_extent_op();
7977                 if (!extent_op) {
7978                         ret = -ENOMEM;
7979                         goto out_free_buf;
7980                 }
7981                 if (key)
7982                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7983                 else
7984                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7985                 extent_op->flags_to_set = flags;
7986                 if (skinny_metadata)
7987                         extent_op->update_key = 0;
7988                 else
7989                         extent_op->update_key = 1;
7990                 extent_op->update_flags = 1;
7991                 extent_op->is_data = 0;
7992                 extent_op->level = level;
7993
7994                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7995                                                  ins.objectid, ins.offset,
7996                                                  parent, root_objectid, level,
7997                                                  BTRFS_ADD_DELAYED_EXTENT,
7998                                                  extent_op, 0);
7999                 if (ret)
8000                         goto out_free_delayed;
8001         }
8002         return buf;
8003
8004 out_free_delayed:
8005         btrfs_free_delayed_extent_op(extent_op);
8006 out_free_buf:
8007         free_extent_buffer(buf);
8008 out_free_reserved:
8009         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8010 out_unuse:
8011         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8012         return ERR_PTR(ret);
8013 }
8014
8015 struct walk_control {
8016         u64 refs[BTRFS_MAX_LEVEL];
8017         u64 flags[BTRFS_MAX_LEVEL];
8018         struct btrfs_key update_progress;
8019         int stage;
8020         int level;
8021         int shared_level;
8022         int update_ref;
8023         int keep_locks;
8024         int reada_slot;
8025         int reada_count;
8026         int for_reloc;
8027 };
8028
8029 #define DROP_REFERENCE  1
8030 #define UPDATE_BACKREF  2
8031
8032 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8033                                      struct btrfs_root *root,
8034                                      struct walk_control *wc,
8035                                      struct btrfs_path *path)
8036 {
8037         u64 bytenr;
8038         u64 generation;
8039         u64 refs;
8040         u64 flags;
8041         u32 nritems;
8042         u32 blocksize;
8043         struct btrfs_key key;
8044         struct extent_buffer *eb;
8045         int ret;
8046         int slot;
8047         int nread = 0;
8048
8049         if (path->slots[wc->level] < wc->reada_slot) {
8050                 wc->reada_count = wc->reada_count * 2 / 3;
8051                 wc->reada_count = max(wc->reada_count, 2);
8052         } else {
8053                 wc->reada_count = wc->reada_count * 3 / 2;
8054                 wc->reada_count = min_t(int, wc->reada_count,
8055                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8056         }
8057
8058         eb = path->nodes[wc->level];
8059         nritems = btrfs_header_nritems(eb);
8060         blocksize = root->nodesize;
8061
8062         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8063                 if (nread >= wc->reada_count)
8064                         break;
8065
8066                 cond_resched();
8067                 bytenr = btrfs_node_blockptr(eb, slot);
8068                 generation = btrfs_node_ptr_generation(eb, slot);
8069
8070                 if (slot == path->slots[wc->level])
8071                         goto reada;
8072
8073                 if (wc->stage == UPDATE_BACKREF &&
8074                     generation <= root->root_key.offset)
8075                         continue;
8076
8077                 /* We don't lock the tree block, it's OK to be racy here */
8078                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8079                                                wc->level - 1, 1, &refs,
8080                                                &flags);
8081                 /* We don't care about errors in readahead. */
8082                 if (ret < 0)
8083                         continue;
8084                 BUG_ON(refs == 0);
8085
8086                 if (wc->stage == DROP_REFERENCE) {
8087                         if (refs == 1)
8088                                 goto reada;
8089
8090                         if (wc->level == 1 &&
8091                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8092                                 continue;
8093                         if (!wc->update_ref ||
8094                             generation <= root->root_key.offset)
8095                                 continue;
8096                         btrfs_node_key_to_cpu(eb, &key, slot);
8097                         ret = btrfs_comp_cpu_keys(&key,
8098                                                   &wc->update_progress);
8099                         if (ret < 0)
8100                                 continue;
8101                 } else {
8102                         if (wc->level == 1 &&
8103                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8104                                 continue;
8105                 }
8106 reada:
8107                 readahead_tree_block(root, bytenr);
8108                 nread++;
8109         }
8110         wc->reada_slot = slot;
8111 }
8112
8113 /*
8114  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
8115  * for later qgroup accounting.
8116  *
8117  * Current, this function does nothing.
8118  */
8119 static int account_leaf_items(struct btrfs_trans_handle *trans,
8120                               struct btrfs_root *root,
8121                               struct extent_buffer *eb)
8122 {
8123         int nr = btrfs_header_nritems(eb);
8124         int i, extent_type;
8125         struct btrfs_key key;
8126         struct btrfs_file_extent_item *fi;
8127         u64 bytenr, num_bytes;
8128
8129         for (i = 0; i < nr; i++) {
8130                 btrfs_item_key_to_cpu(eb, &key, i);
8131
8132                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8133                         continue;
8134
8135                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8136                 /* filter out non qgroup-accountable extents  */
8137                 extent_type = btrfs_file_extent_type(eb, fi);
8138
8139                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8140                         continue;
8141
8142                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8143                 if (!bytenr)
8144                         continue;
8145
8146                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8147         }
8148         return 0;
8149 }
8150
8151 /*
8152  * Walk up the tree from the bottom, freeing leaves and any interior
8153  * nodes which have had all slots visited. If a node (leaf or
8154  * interior) is freed, the node above it will have it's slot
8155  * incremented. The root node will never be freed.
8156  *
8157  * At the end of this function, we should have a path which has all
8158  * slots incremented to the next position for a search. If we need to
8159  * read a new node it will be NULL and the node above it will have the
8160  * correct slot selected for a later read.
8161  *
8162  * If we increment the root nodes slot counter past the number of
8163  * elements, 1 is returned to signal completion of the search.
8164  */
8165 static int adjust_slots_upwards(struct btrfs_root *root,
8166                                 struct btrfs_path *path, int root_level)
8167 {
8168         int level = 0;
8169         int nr, slot;
8170         struct extent_buffer *eb;
8171
8172         if (root_level == 0)
8173                 return 1;
8174
8175         while (level <= root_level) {
8176                 eb = path->nodes[level];
8177                 nr = btrfs_header_nritems(eb);
8178                 path->slots[level]++;
8179                 slot = path->slots[level];
8180                 if (slot >= nr || level == 0) {
8181                         /*
8182                          * Don't free the root -  we will detect this
8183                          * condition after our loop and return a
8184                          * positive value for caller to stop walking the tree.
8185                          */
8186                         if (level != root_level) {
8187                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8188                                 path->locks[level] = 0;
8189
8190                                 free_extent_buffer(eb);
8191                                 path->nodes[level] = NULL;
8192                                 path->slots[level] = 0;
8193                         }
8194                 } else {
8195                         /*
8196                          * We have a valid slot to walk back down
8197                          * from. Stop here so caller can process these
8198                          * new nodes.
8199                          */
8200                         break;
8201                 }
8202
8203                 level++;
8204         }
8205
8206         eb = path->nodes[root_level];
8207         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8208                 return 1;
8209
8210         return 0;
8211 }
8212
8213 /*
8214  * root_eb is the subtree root and is locked before this function is called.
8215  * TODO: Modify this function to mark all (including complete shared node)
8216  * to dirty_extent_root to allow it get accounted in qgroup.
8217  */
8218 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8219                                   struct btrfs_root *root,
8220                                   struct extent_buffer *root_eb,
8221                                   u64 root_gen,
8222                                   int root_level)
8223 {
8224         int ret = 0;
8225         int level;
8226         struct extent_buffer *eb = root_eb;
8227         struct btrfs_path *path = NULL;
8228
8229         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8230         BUG_ON(root_eb == NULL);
8231
8232         if (!root->fs_info->quota_enabled)
8233                 return 0;
8234
8235         if (!extent_buffer_uptodate(root_eb)) {
8236                 ret = btrfs_read_buffer(root_eb, root_gen);
8237                 if (ret)
8238                         goto out;
8239         }
8240
8241         if (root_level == 0) {
8242                 ret = account_leaf_items(trans, root, root_eb);
8243                 goto out;
8244         }
8245
8246         path = btrfs_alloc_path();
8247         if (!path)
8248                 return -ENOMEM;
8249
8250         /*
8251          * Walk down the tree.  Missing extent blocks are filled in as
8252          * we go. Metadata is accounted every time we read a new
8253          * extent block.
8254          *
8255          * When we reach a leaf, we account for file extent items in it,
8256          * walk back up the tree (adjusting slot pointers as we go)
8257          * and restart the search process.
8258          */
8259         extent_buffer_get(root_eb); /* For path */
8260         path->nodes[root_level] = root_eb;
8261         path->slots[root_level] = 0;
8262         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8263 walk_down:
8264         level = root_level;
8265         while (level >= 0) {
8266                 if (path->nodes[level] == NULL) {
8267                         int parent_slot;
8268                         u64 child_gen;
8269                         u64 child_bytenr;
8270
8271                         /* We need to get child blockptr/gen from
8272                          * parent before we can read it. */
8273                         eb = path->nodes[level + 1];
8274                         parent_slot = path->slots[level + 1];
8275                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8276                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8277
8278                         eb = read_tree_block(root, child_bytenr, child_gen);
8279                         if (IS_ERR(eb)) {
8280                                 ret = PTR_ERR(eb);
8281                                 goto out;
8282                         } else if (!extent_buffer_uptodate(eb)) {
8283                                 free_extent_buffer(eb);
8284                                 ret = -EIO;
8285                                 goto out;
8286                         }
8287
8288                         path->nodes[level] = eb;
8289                         path->slots[level] = 0;
8290
8291                         btrfs_tree_read_lock(eb);
8292                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8293                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8294                 }
8295
8296                 if (level == 0) {
8297                         ret = account_leaf_items(trans, root, path->nodes[level]);
8298                         if (ret)
8299                                 goto out;
8300
8301                         /* Nonzero return here means we completed our search */
8302                         ret = adjust_slots_upwards(root, path, root_level);
8303                         if (ret)
8304                                 break;
8305
8306                         /* Restart search with new slots */
8307                         goto walk_down;
8308                 }
8309
8310                 level--;
8311         }
8312
8313         ret = 0;
8314 out:
8315         btrfs_free_path(path);
8316
8317         return ret;
8318 }
8319
8320 /*
8321  * helper to process tree block while walking down the tree.
8322  *
8323  * when wc->stage == UPDATE_BACKREF, this function updates
8324  * back refs for pointers in the block.
8325  *
8326  * NOTE: return value 1 means we should stop walking down.
8327  */
8328 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8329                                    struct btrfs_root *root,
8330                                    struct btrfs_path *path,
8331                                    struct walk_control *wc, int lookup_info)
8332 {
8333         int level = wc->level;
8334         struct extent_buffer *eb = path->nodes[level];
8335         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8336         int ret;
8337
8338         if (wc->stage == UPDATE_BACKREF &&
8339             btrfs_header_owner(eb) != root->root_key.objectid)
8340                 return 1;
8341
8342         /*
8343          * when reference count of tree block is 1, it won't increase
8344          * again. once full backref flag is set, we never clear it.
8345          */
8346         if (lookup_info &&
8347             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8348              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8349                 BUG_ON(!path->locks[level]);
8350                 ret = btrfs_lookup_extent_info(trans, root,
8351                                                eb->start, level, 1,
8352                                                &wc->refs[level],
8353                                                &wc->flags[level]);
8354                 BUG_ON(ret == -ENOMEM);
8355                 if (ret)
8356                         return ret;
8357                 BUG_ON(wc->refs[level] == 0);
8358         }
8359
8360         if (wc->stage == DROP_REFERENCE) {
8361                 if (wc->refs[level] > 1)
8362                         return 1;
8363
8364                 if (path->locks[level] && !wc->keep_locks) {
8365                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8366                         path->locks[level] = 0;
8367                 }
8368                 return 0;
8369         }
8370
8371         /* wc->stage == UPDATE_BACKREF */
8372         if (!(wc->flags[level] & flag)) {
8373                 BUG_ON(!path->locks[level]);
8374                 ret = btrfs_inc_ref(trans, root, eb, 1);
8375                 BUG_ON(ret); /* -ENOMEM */
8376                 ret = btrfs_dec_ref(trans, root, eb, 0);
8377                 BUG_ON(ret); /* -ENOMEM */
8378                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8379                                                   eb->len, flag,
8380                                                   btrfs_header_level(eb), 0);
8381                 BUG_ON(ret); /* -ENOMEM */
8382                 wc->flags[level] |= flag;
8383         }
8384
8385         /*
8386          * the block is shared by multiple trees, so it's not good to
8387          * keep the tree lock
8388          */
8389         if (path->locks[level] && level > 0) {
8390                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8391                 path->locks[level] = 0;
8392         }
8393         return 0;
8394 }
8395
8396 /*
8397  * helper to process tree block pointer.
8398  *
8399  * when wc->stage == DROP_REFERENCE, this function checks
8400  * reference count of the block pointed to. if the block
8401  * is shared and we need update back refs for the subtree
8402  * rooted at the block, this function changes wc->stage to
8403  * UPDATE_BACKREF. if the block is shared and there is no
8404  * need to update back, this function drops the reference
8405  * to the block.
8406  *
8407  * NOTE: return value 1 means we should stop walking down.
8408  */
8409 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8410                                  struct btrfs_root *root,
8411                                  struct btrfs_path *path,
8412                                  struct walk_control *wc, int *lookup_info)
8413 {
8414         u64 bytenr;
8415         u64 generation;
8416         u64 parent;
8417         u32 blocksize;
8418         struct btrfs_key key;
8419         struct extent_buffer *next;
8420         int level = wc->level;
8421         int reada = 0;
8422         int ret = 0;
8423         bool need_account = false;
8424
8425         generation = btrfs_node_ptr_generation(path->nodes[level],
8426                                                path->slots[level]);
8427         /*
8428          * if the lower level block was created before the snapshot
8429          * was created, we know there is no need to update back refs
8430          * for the subtree
8431          */
8432         if (wc->stage == UPDATE_BACKREF &&
8433             generation <= root->root_key.offset) {
8434                 *lookup_info = 1;
8435                 return 1;
8436         }
8437
8438         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8439         blocksize = root->nodesize;
8440
8441         next = btrfs_find_tree_block(root->fs_info, bytenr);
8442         if (!next) {
8443                 next = btrfs_find_create_tree_block(root, bytenr);
8444                 if (!next)
8445                         return -ENOMEM;
8446                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8447                                                level - 1);
8448                 reada = 1;
8449         }
8450         btrfs_tree_lock(next);
8451         btrfs_set_lock_blocking(next);
8452
8453         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8454                                        &wc->refs[level - 1],
8455                                        &wc->flags[level - 1]);
8456         if (ret < 0) {
8457                 btrfs_tree_unlock(next);
8458                 return ret;
8459         }
8460
8461         if (unlikely(wc->refs[level - 1] == 0)) {
8462                 btrfs_err(root->fs_info, "Missing references.");
8463                 BUG();
8464         }
8465         *lookup_info = 0;
8466
8467         if (wc->stage == DROP_REFERENCE) {
8468                 if (wc->refs[level - 1] > 1) {
8469                         need_account = true;
8470                         if (level == 1 &&
8471                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8472                                 goto skip;
8473
8474                         if (!wc->update_ref ||
8475                             generation <= root->root_key.offset)
8476                                 goto skip;
8477
8478                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8479                                               path->slots[level]);
8480                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8481                         if (ret < 0)
8482                                 goto skip;
8483
8484                         wc->stage = UPDATE_BACKREF;
8485                         wc->shared_level = level - 1;
8486                 }
8487         } else {
8488                 if (level == 1 &&
8489                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8490                         goto skip;
8491         }
8492
8493         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8494                 btrfs_tree_unlock(next);
8495                 free_extent_buffer(next);
8496                 next = NULL;
8497                 *lookup_info = 1;
8498         }
8499
8500         if (!next) {
8501                 if (reada && level == 1)
8502                         reada_walk_down(trans, root, wc, path);
8503                 next = read_tree_block(root, bytenr, generation);
8504                 if (IS_ERR(next)) {
8505                         return PTR_ERR(next);
8506                 } else if (!extent_buffer_uptodate(next)) {
8507                         free_extent_buffer(next);
8508                         return -EIO;
8509                 }
8510                 btrfs_tree_lock(next);
8511                 btrfs_set_lock_blocking(next);
8512         }
8513
8514         level--;
8515         BUG_ON(level != btrfs_header_level(next));
8516         path->nodes[level] = next;
8517         path->slots[level] = 0;
8518         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8519         wc->level = level;
8520         if (wc->level == 1)
8521                 wc->reada_slot = 0;
8522         return 0;
8523 skip:
8524         wc->refs[level - 1] = 0;
8525         wc->flags[level - 1] = 0;
8526         if (wc->stage == DROP_REFERENCE) {
8527                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8528                         parent = path->nodes[level]->start;
8529                 } else {
8530                         BUG_ON(root->root_key.objectid !=
8531                                btrfs_header_owner(path->nodes[level]));
8532                         parent = 0;
8533                 }
8534
8535                 if (need_account) {
8536                         ret = account_shared_subtree(trans, root, next,
8537                                                      generation, level - 1);
8538                         if (ret) {
8539                                 btrfs_err_rl(root->fs_info,
8540                                         "Error "
8541                                         "%d accounting shared subtree. Quota "
8542                                         "is out of sync, rescan required.",
8543                                         ret);
8544                         }
8545                 }
8546                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8547                                 root->root_key.objectid, level - 1, 0, 0);
8548                 BUG_ON(ret); /* -ENOMEM */
8549         }
8550         btrfs_tree_unlock(next);
8551         free_extent_buffer(next);
8552         *lookup_info = 1;
8553         return 1;
8554 }
8555
8556 /*
8557  * helper to process tree block while walking up the tree.
8558  *
8559  * when wc->stage == DROP_REFERENCE, this function drops
8560  * reference count on the block.
8561  *
8562  * when wc->stage == UPDATE_BACKREF, this function changes
8563  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8564  * to UPDATE_BACKREF previously while processing the block.
8565  *
8566  * NOTE: return value 1 means we should stop walking up.
8567  */
8568 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8569                                  struct btrfs_root *root,
8570                                  struct btrfs_path *path,
8571                                  struct walk_control *wc)
8572 {
8573         int ret;
8574         int level = wc->level;
8575         struct extent_buffer *eb = path->nodes[level];
8576         u64 parent = 0;
8577
8578         if (wc->stage == UPDATE_BACKREF) {
8579                 BUG_ON(wc->shared_level < level);
8580                 if (level < wc->shared_level)
8581                         goto out;
8582
8583                 ret = find_next_key(path, level + 1, &wc->update_progress);
8584                 if (ret > 0)
8585                         wc->update_ref = 0;
8586
8587                 wc->stage = DROP_REFERENCE;
8588                 wc->shared_level = -1;
8589                 path->slots[level] = 0;
8590
8591                 /*
8592                  * check reference count again if the block isn't locked.
8593                  * we should start walking down the tree again if reference
8594                  * count is one.
8595                  */
8596                 if (!path->locks[level]) {
8597                         BUG_ON(level == 0);
8598                         btrfs_tree_lock(eb);
8599                         btrfs_set_lock_blocking(eb);
8600                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8601
8602                         ret = btrfs_lookup_extent_info(trans, root,
8603                                                        eb->start, level, 1,
8604                                                        &wc->refs[level],
8605                                                        &wc->flags[level]);
8606                         if (ret < 0) {
8607                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8608                                 path->locks[level] = 0;
8609                                 return ret;
8610                         }
8611                         BUG_ON(wc->refs[level] == 0);
8612                         if (wc->refs[level] == 1) {
8613                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8614                                 path->locks[level] = 0;
8615                                 return 1;
8616                         }
8617                 }
8618         }
8619
8620         /* wc->stage == DROP_REFERENCE */
8621         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8622
8623         if (wc->refs[level] == 1) {
8624                 if (level == 0) {
8625                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8626                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8627                         else
8628                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8629                         BUG_ON(ret); /* -ENOMEM */
8630                         ret = account_leaf_items(trans, root, eb);
8631                         if (ret) {
8632                                 btrfs_err_rl(root->fs_info,
8633                                         "error "
8634                                         "%d accounting leaf items. Quota "
8635                                         "is out of sync, rescan required.",
8636                                         ret);
8637                         }
8638                 }
8639                 /* make block locked assertion in clean_tree_block happy */
8640                 if (!path->locks[level] &&
8641                     btrfs_header_generation(eb) == trans->transid) {
8642                         btrfs_tree_lock(eb);
8643                         btrfs_set_lock_blocking(eb);
8644                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8645                 }
8646                 clean_tree_block(trans, root->fs_info, eb);
8647         }
8648
8649         if (eb == root->node) {
8650                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8651                         parent = eb->start;
8652                 else
8653                         BUG_ON(root->root_key.objectid !=
8654                                btrfs_header_owner(eb));
8655         } else {
8656                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8657                         parent = path->nodes[level + 1]->start;
8658                 else
8659                         BUG_ON(root->root_key.objectid !=
8660                                btrfs_header_owner(path->nodes[level + 1]));
8661         }
8662
8663         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8664 out:
8665         wc->refs[level] = 0;
8666         wc->flags[level] = 0;
8667         return 0;
8668 }
8669
8670 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8671                                    struct btrfs_root *root,
8672                                    struct btrfs_path *path,
8673                                    struct walk_control *wc)
8674 {
8675         int level = wc->level;
8676         int lookup_info = 1;
8677         int ret;
8678
8679         while (level >= 0) {
8680                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8681                 if (ret > 0)
8682                         break;
8683
8684                 if (level == 0)
8685                         break;
8686
8687                 if (path->slots[level] >=
8688                     btrfs_header_nritems(path->nodes[level]))
8689                         break;
8690
8691                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8692                 if (ret > 0) {
8693                         path->slots[level]++;
8694                         continue;
8695                 } else if (ret < 0)
8696                         return ret;
8697                 level = wc->level;
8698         }
8699         return 0;
8700 }
8701
8702 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8703                                  struct btrfs_root *root,
8704                                  struct btrfs_path *path,
8705                                  struct walk_control *wc, int max_level)
8706 {
8707         int level = wc->level;
8708         int ret;
8709
8710         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8711         while (level < max_level && path->nodes[level]) {
8712                 wc->level = level;
8713                 if (path->slots[level] + 1 <
8714                     btrfs_header_nritems(path->nodes[level])) {
8715                         path->slots[level]++;
8716                         return 0;
8717                 } else {
8718                         ret = walk_up_proc(trans, root, path, wc);
8719                         if (ret > 0)
8720                                 return 0;
8721
8722                         if (path->locks[level]) {
8723                                 btrfs_tree_unlock_rw(path->nodes[level],
8724                                                      path->locks[level]);
8725                                 path->locks[level] = 0;
8726                         }
8727                         free_extent_buffer(path->nodes[level]);
8728                         path->nodes[level] = NULL;
8729                         level++;
8730                 }
8731         }
8732         return 1;
8733 }
8734
8735 /*
8736  * drop a subvolume tree.
8737  *
8738  * this function traverses the tree freeing any blocks that only
8739  * referenced by the tree.
8740  *
8741  * when a shared tree block is found. this function decreases its
8742  * reference count by one. if update_ref is true, this function
8743  * also make sure backrefs for the shared block and all lower level
8744  * blocks are properly updated.
8745  *
8746  * If called with for_reloc == 0, may exit early with -EAGAIN
8747  */
8748 int btrfs_drop_snapshot(struct btrfs_root *root,
8749                          struct btrfs_block_rsv *block_rsv, int update_ref,
8750                          int for_reloc)
8751 {
8752         struct btrfs_path *path;
8753         struct btrfs_trans_handle *trans;
8754         struct btrfs_root *tree_root = root->fs_info->tree_root;
8755         struct btrfs_root_item *root_item = &root->root_item;
8756         struct walk_control *wc;
8757         struct btrfs_key key;
8758         int err = 0;
8759         int ret;
8760         int level;
8761         bool root_dropped = false;
8762
8763         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8764
8765         path = btrfs_alloc_path();
8766         if (!path) {
8767                 err = -ENOMEM;
8768                 goto out;
8769         }
8770
8771         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8772         if (!wc) {
8773                 btrfs_free_path(path);
8774                 err = -ENOMEM;
8775                 goto out;
8776         }
8777
8778         trans = btrfs_start_transaction(tree_root, 0);
8779         if (IS_ERR(trans)) {
8780                 err = PTR_ERR(trans);
8781                 goto out_free;
8782         }
8783
8784         if (block_rsv)
8785                 trans->block_rsv = block_rsv;
8786
8787         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8788                 level = btrfs_header_level(root->node);
8789                 path->nodes[level] = btrfs_lock_root_node(root);
8790                 btrfs_set_lock_blocking(path->nodes[level]);
8791                 path->slots[level] = 0;
8792                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8793                 memset(&wc->update_progress, 0,
8794                        sizeof(wc->update_progress));
8795         } else {
8796                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8797                 memcpy(&wc->update_progress, &key,
8798                        sizeof(wc->update_progress));
8799
8800                 level = root_item->drop_level;
8801                 BUG_ON(level == 0);
8802                 path->lowest_level = level;
8803                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8804                 path->lowest_level = 0;
8805                 if (ret < 0) {
8806                         err = ret;
8807                         goto out_end_trans;
8808                 }
8809                 WARN_ON(ret > 0);
8810
8811                 /*
8812                  * unlock our path, this is safe because only this
8813                  * function is allowed to delete this snapshot
8814                  */
8815                 btrfs_unlock_up_safe(path, 0);
8816
8817                 level = btrfs_header_level(root->node);
8818                 while (1) {
8819                         btrfs_tree_lock(path->nodes[level]);
8820                         btrfs_set_lock_blocking(path->nodes[level]);
8821                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8822
8823                         ret = btrfs_lookup_extent_info(trans, root,
8824                                                 path->nodes[level]->start,
8825                                                 level, 1, &wc->refs[level],
8826                                                 &wc->flags[level]);
8827                         if (ret < 0) {
8828                                 err = ret;
8829                                 goto out_end_trans;
8830                         }
8831                         BUG_ON(wc->refs[level] == 0);
8832
8833                         if (level == root_item->drop_level)
8834                                 break;
8835
8836                         btrfs_tree_unlock(path->nodes[level]);
8837                         path->locks[level] = 0;
8838                         WARN_ON(wc->refs[level] != 1);
8839                         level--;
8840                 }
8841         }
8842
8843         wc->level = level;
8844         wc->shared_level = -1;
8845         wc->stage = DROP_REFERENCE;
8846         wc->update_ref = update_ref;
8847         wc->keep_locks = 0;
8848         wc->for_reloc = for_reloc;
8849         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8850
8851         while (1) {
8852
8853                 ret = walk_down_tree(trans, root, path, wc);
8854                 if (ret < 0) {
8855                         err = ret;
8856                         break;
8857                 }
8858
8859                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8860                 if (ret < 0) {
8861                         err = ret;
8862                         break;
8863                 }
8864
8865                 if (ret > 0) {
8866                         BUG_ON(wc->stage != DROP_REFERENCE);
8867                         break;
8868                 }
8869
8870                 if (wc->stage == DROP_REFERENCE) {
8871                         level = wc->level;
8872                         btrfs_node_key(path->nodes[level],
8873                                        &root_item->drop_progress,
8874                                        path->slots[level]);
8875                         root_item->drop_level = level;
8876                 }
8877
8878                 BUG_ON(wc->level == 0);
8879                 if (btrfs_should_end_transaction(trans, tree_root) ||
8880                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8881                         ret = btrfs_update_root(trans, tree_root,
8882                                                 &root->root_key,
8883                                                 root_item);
8884                         if (ret) {
8885                                 btrfs_abort_transaction(trans, tree_root, ret);
8886                                 err = ret;
8887                                 goto out_end_trans;
8888                         }
8889
8890                         btrfs_end_transaction_throttle(trans, tree_root);
8891                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8892                                 pr_debug("BTRFS: drop snapshot early exit\n");
8893                                 err = -EAGAIN;
8894                                 goto out_free;
8895                         }
8896
8897                         trans = btrfs_start_transaction(tree_root, 0);
8898                         if (IS_ERR(trans)) {
8899                                 err = PTR_ERR(trans);
8900                                 goto out_free;
8901                         }
8902                         if (block_rsv)
8903                                 trans->block_rsv = block_rsv;
8904                 }
8905         }
8906         btrfs_release_path(path);
8907         if (err)
8908                 goto out_end_trans;
8909
8910         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8911         if (ret) {
8912                 btrfs_abort_transaction(trans, tree_root, ret);
8913                 goto out_end_trans;
8914         }
8915
8916         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8917                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8918                                       NULL, NULL);
8919                 if (ret < 0) {
8920                         btrfs_abort_transaction(trans, tree_root, ret);
8921                         err = ret;
8922                         goto out_end_trans;
8923                 } else if (ret > 0) {
8924                         /* if we fail to delete the orphan item this time
8925                          * around, it'll get picked up the next time.
8926                          *
8927                          * The most common failure here is just -ENOENT.
8928                          */
8929                         btrfs_del_orphan_item(trans, tree_root,
8930                                               root->root_key.objectid);
8931                 }
8932         }
8933
8934         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8935                 btrfs_add_dropped_root(trans, root);
8936         } else {
8937                 free_extent_buffer(root->node);
8938                 free_extent_buffer(root->commit_root);
8939                 btrfs_put_fs_root(root);
8940         }
8941         root_dropped = true;
8942 out_end_trans:
8943         btrfs_end_transaction_throttle(trans, tree_root);
8944 out_free:
8945         kfree(wc);
8946         btrfs_free_path(path);
8947 out:
8948         /*
8949          * So if we need to stop dropping the snapshot for whatever reason we
8950          * need to make sure to add it back to the dead root list so that we
8951          * keep trying to do the work later.  This also cleans up roots if we
8952          * don't have it in the radix (like when we recover after a power fail
8953          * or unmount) so we don't leak memory.
8954          */
8955         if (!for_reloc && root_dropped == false)
8956                 btrfs_add_dead_root(root);
8957         if (err && err != -EAGAIN)
8958                 btrfs_std_error(root->fs_info, err, NULL);
8959         return err;
8960 }
8961
8962 /*
8963  * drop subtree rooted at tree block 'node'.
8964  *
8965  * NOTE: this function will unlock and release tree block 'node'
8966  * only used by relocation code
8967  */
8968 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8969                         struct btrfs_root *root,
8970                         struct extent_buffer *node,
8971                         struct extent_buffer *parent)
8972 {
8973         struct btrfs_path *path;
8974         struct walk_control *wc;
8975         int level;
8976         int parent_level;
8977         int ret = 0;
8978         int wret;
8979
8980         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8981
8982         path = btrfs_alloc_path();
8983         if (!path)
8984                 return -ENOMEM;
8985
8986         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8987         if (!wc) {
8988                 btrfs_free_path(path);
8989                 return -ENOMEM;
8990         }
8991
8992         btrfs_assert_tree_locked(parent);
8993         parent_level = btrfs_header_level(parent);
8994         extent_buffer_get(parent);
8995         path->nodes[parent_level] = parent;
8996         path->slots[parent_level] = btrfs_header_nritems(parent);
8997
8998         btrfs_assert_tree_locked(node);
8999         level = btrfs_header_level(node);
9000         path->nodes[level] = node;
9001         path->slots[level] = 0;
9002         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9003
9004         wc->refs[parent_level] = 1;
9005         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9006         wc->level = level;
9007         wc->shared_level = -1;
9008         wc->stage = DROP_REFERENCE;
9009         wc->update_ref = 0;
9010         wc->keep_locks = 1;
9011         wc->for_reloc = 1;
9012         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9013
9014         while (1) {
9015                 wret = walk_down_tree(trans, root, path, wc);
9016                 if (wret < 0) {
9017                         ret = wret;
9018                         break;
9019                 }
9020
9021                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9022                 if (wret < 0)
9023                         ret = wret;
9024                 if (wret != 0)
9025                         break;
9026         }
9027
9028         kfree(wc);
9029         btrfs_free_path(path);
9030         return ret;
9031 }
9032
9033 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9034 {
9035         u64 num_devices;
9036         u64 stripped;
9037
9038         /*
9039          * if restripe for this chunk_type is on pick target profile and
9040          * return, otherwise do the usual balance
9041          */
9042         stripped = get_restripe_target(root->fs_info, flags);
9043         if (stripped)
9044                 return extended_to_chunk(stripped);
9045
9046         num_devices = root->fs_info->fs_devices->rw_devices;
9047
9048         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9049                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9050                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9051
9052         if (num_devices == 1) {
9053                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9054                 stripped = flags & ~stripped;
9055
9056                 /* turn raid0 into single device chunks */
9057                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9058                         return stripped;
9059
9060                 /* turn mirroring into duplication */
9061                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9062                              BTRFS_BLOCK_GROUP_RAID10))
9063                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9064         } else {
9065                 /* they already had raid on here, just return */
9066                 if (flags & stripped)
9067                         return flags;
9068
9069                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9070                 stripped = flags & ~stripped;
9071
9072                 /* switch duplicated blocks with raid1 */
9073                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9074                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9075
9076                 /* this is drive concat, leave it alone */
9077         }
9078
9079         return flags;
9080 }
9081
9082 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9083 {
9084         struct btrfs_space_info *sinfo = cache->space_info;
9085         u64 num_bytes;
9086         u64 min_allocable_bytes;
9087         int ret = -ENOSPC;
9088
9089         /*
9090          * We need some metadata space and system metadata space for
9091          * allocating chunks in some corner cases until we force to set
9092          * it to be readonly.
9093          */
9094         if ((sinfo->flags &
9095              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9096             !force)
9097                 min_allocable_bytes = 1 * 1024 * 1024;
9098         else
9099                 min_allocable_bytes = 0;
9100
9101         spin_lock(&sinfo->lock);
9102         spin_lock(&cache->lock);
9103
9104         if (cache->ro) {
9105                 cache->ro++;
9106                 ret = 0;
9107                 goto out;
9108         }
9109
9110         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9111                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9112
9113         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9114             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9115             min_allocable_bytes <= sinfo->total_bytes) {
9116                 sinfo->bytes_readonly += num_bytes;
9117                 cache->ro++;
9118                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9119                 ret = 0;
9120         }
9121 out:
9122         spin_unlock(&cache->lock);
9123         spin_unlock(&sinfo->lock);
9124         return ret;
9125 }
9126
9127 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9128                              struct btrfs_block_group_cache *cache)
9129
9130 {
9131         struct btrfs_trans_handle *trans;
9132         u64 alloc_flags;
9133         int ret;
9134
9135 again:
9136         trans = btrfs_join_transaction(root);
9137         if (IS_ERR(trans))
9138                 return PTR_ERR(trans);
9139
9140         /*
9141          * we're not allowed to set block groups readonly after the dirty
9142          * block groups cache has started writing.  If it already started,
9143          * back off and let this transaction commit
9144          */
9145         mutex_lock(&root->fs_info->ro_block_group_mutex);
9146         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9147                 u64 transid = trans->transid;
9148
9149                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9150                 btrfs_end_transaction(trans, root);
9151
9152                 ret = btrfs_wait_for_commit(root, transid);
9153                 if (ret)
9154                         return ret;
9155                 goto again;
9156         }
9157
9158         /*
9159          * if we are changing raid levels, try to allocate a corresponding
9160          * block group with the new raid level.
9161          */
9162         alloc_flags = update_block_group_flags(root, cache->flags);
9163         if (alloc_flags != cache->flags) {
9164                 ret = do_chunk_alloc(trans, root, alloc_flags,
9165                                      CHUNK_ALLOC_FORCE);
9166                 /*
9167                  * ENOSPC is allowed here, we may have enough space
9168                  * already allocated at the new raid level to
9169                  * carry on
9170                  */
9171                 if (ret == -ENOSPC)
9172                         ret = 0;
9173                 if (ret < 0)
9174                         goto out;
9175         }
9176
9177         ret = inc_block_group_ro(cache, 0);
9178         if (!ret)
9179                 goto out;
9180         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9181         ret = do_chunk_alloc(trans, root, alloc_flags,
9182                              CHUNK_ALLOC_FORCE);
9183         if (ret < 0)
9184                 goto out;
9185         ret = inc_block_group_ro(cache, 0);
9186 out:
9187         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9188                 alloc_flags = update_block_group_flags(root, cache->flags);
9189                 lock_chunks(root->fs_info->chunk_root);
9190                 check_system_chunk(trans, root, alloc_flags);
9191                 unlock_chunks(root->fs_info->chunk_root);
9192         }
9193         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9194
9195         btrfs_end_transaction(trans, root);
9196         return ret;
9197 }
9198
9199 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9200                             struct btrfs_root *root, u64 type)
9201 {
9202         u64 alloc_flags = get_alloc_profile(root, type);
9203         return do_chunk_alloc(trans, root, alloc_flags,
9204                               CHUNK_ALLOC_FORCE);
9205 }
9206
9207 /*
9208  * helper to account the unused space of all the readonly block group in the
9209  * space_info. takes mirrors into account.
9210  */
9211 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9212 {
9213         struct btrfs_block_group_cache *block_group;
9214         u64 free_bytes = 0;
9215         int factor;
9216
9217         /* It's df, we don't care if it's racey */
9218         if (list_empty(&sinfo->ro_bgs))
9219                 return 0;
9220
9221         spin_lock(&sinfo->lock);
9222         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9223                 spin_lock(&block_group->lock);
9224
9225                 if (!block_group->ro) {
9226                         spin_unlock(&block_group->lock);
9227                         continue;
9228                 }
9229
9230                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9231                                           BTRFS_BLOCK_GROUP_RAID10 |
9232                                           BTRFS_BLOCK_GROUP_DUP))
9233                         factor = 2;
9234                 else
9235                         factor = 1;
9236
9237                 free_bytes += (block_group->key.offset -
9238                                btrfs_block_group_used(&block_group->item)) *
9239                                factor;
9240
9241                 spin_unlock(&block_group->lock);
9242         }
9243         spin_unlock(&sinfo->lock);
9244
9245         return free_bytes;
9246 }
9247
9248 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9249                               struct btrfs_block_group_cache *cache)
9250 {
9251         struct btrfs_space_info *sinfo = cache->space_info;
9252         u64 num_bytes;
9253
9254         BUG_ON(!cache->ro);
9255
9256         spin_lock(&sinfo->lock);
9257         spin_lock(&cache->lock);
9258         if (!--cache->ro) {
9259                 num_bytes = cache->key.offset - cache->reserved -
9260                             cache->pinned - cache->bytes_super -
9261                             btrfs_block_group_used(&cache->item);
9262                 sinfo->bytes_readonly -= num_bytes;
9263                 list_del_init(&cache->ro_list);
9264         }
9265         spin_unlock(&cache->lock);
9266         spin_unlock(&sinfo->lock);
9267 }
9268
9269 /*
9270  * checks to see if its even possible to relocate this block group.
9271  *
9272  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9273  * ok to go ahead and try.
9274  */
9275 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9276 {
9277         struct btrfs_block_group_cache *block_group;
9278         struct btrfs_space_info *space_info;
9279         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9280         struct btrfs_device *device;
9281         struct btrfs_trans_handle *trans;
9282         u64 min_free;
9283         u64 dev_min = 1;
9284         u64 dev_nr = 0;
9285         u64 target;
9286         int index;
9287         int full = 0;
9288         int ret = 0;
9289
9290         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9291
9292         /* odd, couldn't find the block group, leave it alone */
9293         if (!block_group)
9294                 return -1;
9295
9296         min_free = btrfs_block_group_used(&block_group->item);
9297
9298         /* no bytes used, we're good */
9299         if (!min_free)
9300                 goto out;
9301
9302         space_info = block_group->space_info;
9303         spin_lock(&space_info->lock);
9304
9305         full = space_info->full;
9306
9307         /*
9308          * if this is the last block group we have in this space, we can't
9309          * relocate it unless we're able to allocate a new chunk below.
9310          *
9311          * Otherwise, we need to make sure we have room in the space to handle
9312          * all of the extents from this block group.  If we can, we're good
9313          */
9314         if ((space_info->total_bytes != block_group->key.offset) &&
9315             (space_info->bytes_used + space_info->bytes_reserved +
9316              space_info->bytes_pinned + space_info->bytes_readonly +
9317              min_free < space_info->total_bytes)) {
9318                 spin_unlock(&space_info->lock);
9319                 goto out;
9320         }
9321         spin_unlock(&space_info->lock);
9322
9323         /*
9324          * ok we don't have enough space, but maybe we have free space on our
9325          * devices to allocate new chunks for relocation, so loop through our
9326          * alloc devices and guess if we have enough space.  if this block
9327          * group is going to be restriped, run checks against the target
9328          * profile instead of the current one.
9329          */
9330         ret = -1;
9331
9332         /*
9333          * index:
9334          *      0: raid10
9335          *      1: raid1
9336          *      2: dup
9337          *      3: raid0
9338          *      4: single
9339          */
9340         target = get_restripe_target(root->fs_info, block_group->flags);
9341         if (target) {
9342                 index = __get_raid_index(extended_to_chunk(target));
9343         } else {
9344                 /*
9345                  * this is just a balance, so if we were marked as full
9346                  * we know there is no space for a new chunk
9347                  */
9348                 if (full)
9349                         goto out;
9350
9351                 index = get_block_group_index(block_group);
9352         }
9353
9354         if (index == BTRFS_RAID_RAID10) {
9355                 dev_min = 4;
9356                 /* Divide by 2 */
9357                 min_free >>= 1;
9358         } else if (index == BTRFS_RAID_RAID1) {
9359                 dev_min = 2;
9360         } else if (index == BTRFS_RAID_DUP) {
9361                 /* Multiply by 2 */
9362                 min_free <<= 1;
9363         } else if (index == BTRFS_RAID_RAID0) {
9364                 dev_min = fs_devices->rw_devices;
9365                 min_free = div64_u64(min_free, dev_min);
9366         }
9367
9368         /* We need to do this so that we can look at pending chunks */
9369         trans = btrfs_join_transaction(root);
9370         if (IS_ERR(trans)) {
9371                 ret = PTR_ERR(trans);
9372                 goto out;
9373         }
9374
9375         mutex_lock(&root->fs_info->chunk_mutex);
9376         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9377                 u64 dev_offset;
9378
9379                 /*
9380                  * check to make sure we can actually find a chunk with enough
9381                  * space to fit our block group in.
9382                  */
9383                 if (device->total_bytes > device->bytes_used + min_free &&
9384                     !device->is_tgtdev_for_dev_replace) {
9385                         ret = find_free_dev_extent(trans, device, min_free,
9386                                                    &dev_offset, NULL);
9387                         if (!ret)
9388                                 dev_nr++;
9389
9390                         if (dev_nr >= dev_min)
9391                                 break;
9392
9393                         ret = -1;
9394                 }
9395         }
9396         mutex_unlock(&root->fs_info->chunk_mutex);
9397         btrfs_end_transaction(trans, root);
9398 out:
9399         btrfs_put_block_group(block_group);
9400         return ret;
9401 }
9402
9403 static int find_first_block_group(struct btrfs_root *root,
9404                 struct btrfs_path *path, struct btrfs_key *key)
9405 {
9406         int ret = 0;
9407         struct btrfs_key found_key;
9408         struct extent_buffer *leaf;
9409         int slot;
9410
9411         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9412         if (ret < 0)
9413                 goto out;
9414
9415         while (1) {
9416                 slot = path->slots[0];
9417                 leaf = path->nodes[0];
9418                 if (slot >= btrfs_header_nritems(leaf)) {
9419                         ret = btrfs_next_leaf(root, path);
9420                         if (ret == 0)
9421                                 continue;
9422                         if (ret < 0)
9423                                 goto out;
9424                         break;
9425                 }
9426                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9427
9428                 if (found_key.objectid >= key->objectid &&
9429                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9430                         ret = 0;
9431                         goto out;
9432                 }
9433                 path->slots[0]++;
9434         }
9435 out:
9436         return ret;
9437 }
9438
9439 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9440 {
9441         struct btrfs_block_group_cache *block_group;
9442         u64 last = 0;
9443
9444         while (1) {
9445                 struct inode *inode;
9446
9447                 block_group = btrfs_lookup_first_block_group(info, last);
9448                 while (block_group) {
9449                         spin_lock(&block_group->lock);
9450                         if (block_group->iref)
9451                                 break;
9452                         spin_unlock(&block_group->lock);
9453                         block_group = next_block_group(info->tree_root,
9454                                                        block_group);
9455                 }
9456                 if (!block_group) {
9457                         if (last == 0)
9458                                 break;
9459                         last = 0;
9460                         continue;
9461                 }
9462
9463                 inode = block_group->inode;
9464                 block_group->iref = 0;
9465                 block_group->inode = NULL;
9466                 spin_unlock(&block_group->lock);
9467                 iput(inode);
9468                 last = block_group->key.objectid + block_group->key.offset;
9469                 btrfs_put_block_group(block_group);
9470         }
9471 }
9472
9473 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9474 {
9475         struct btrfs_block_group_cache *block_group;
9476         struct btrfs_space_info *space_info;
9477         struct btrfs_caching_control *caching_ctl;
9478         struct rb_node *n;
9479
9480         down_write(&info->commit_root_sem);
9481         while (!list_empty(&info->caching_block_groups)) {
9482                 caching_ctl = list_entry(info->caching_block_groups.next,
9483                                          struct btrfs_caching_control, list);
9484                 list_del(&caching_ctl->list);
9485                 put_caching_control(caching_ctl);
9486         }
9487         up_write(&info->commit_root_sem);
9488
9489         spin_lock(&info->unused_bgs_lock);
9490         while (!list_empty(&info->unused_bgs)) {
9491                 block_group = list_first_entry(&info->unused_bgs,
9492                                                struct btrfs_block_group_cache,
9493                                                bg_list);
9494                 list_del_init(&block_group->bg_list);
9495                 btrfs_put_block_group(block_group);
9496         }
9497         spin_unlock(&info->unused_bgs_lock);
9498
9499         spin_lock(&info->block_group_cache_lock);
9500         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9501                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9502                                        cache_node);
9503                 rb_erase(&block_group->cache_node,
9504                          &info->block_group_cache_tree);
9505                 RB_CLEAR_NODE(&block_group->cache_node);
9506                 spin_unlock(&info->block_group_cache_lock);
9507
9508                 down_write(&block_group->space_info->groups_sem);
9509                 list_del(&block_group->list);
9510                 up_write(&block_group->space_info->groups_sem);
9511
9512                 if (block_group->cached == BTRFS_CACHE_STARTED)
9513                         wait_block_group_cache_done(block_group);
9514
9515                 /*
9516                  * We haven't cached this block group, which means we could
9517                  * possibly have excluded extents on this block group.
9518                  */
9519                 if (block_group->cached == BTRFS_CACHE_NO ||
9520                     block_group->cached == BTRFS_CACHE_ERROR)
9521                         free_excluded_extents(info->extent_root, block_group);
9522
9523                 btrfs_remove_free_space_cache(block_group);
9524                 btrfs_put_block_group(block_group);
9525
9526                 spin_lock(&info->block_group_cache_lock);
9527         }
9528         spin_unlock(&info->block_group_cache_lock);
9529
9530         /* now that all the block groups are freed, go through and
9531          * free all the space_info structs.  This is only called during
9532          * the final stages of unmount, and so we know nobody is
9533          * using them.  We call synchronize_rcu() once before we start,
9534          * just to be on the safe side.
9535          */
9536         synchronize_rcu();
9537
9538         release_global_block_rsv(info);
9539
9540         while (!list_empty(&info->space_info)) {
9541                 int i;
9542
9543                 space_info = list_entry(info->space_info.next,
9544                                         struct btrfs_space_info,
9545                                         list);
9546                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9547                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9548                             space_info->bytes_reserved > 0 ||
9549                             space_info->bytes_may_use > 0)) {
9550                                 dump_space_info(space_info, 0, 0);
9551                         }
9552                 }
9553                 list_del(&space_info->list);
9554                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9555                         struct kobject *kobj;
9556                         kobj = space_info->block_group_kobjs[i];
9557                         space_info->block_group_kobjs[i] = NULL;
9558                         if (kobj) {
9559                                 kobject_del(kobj);
9560                                 kobject_put(kobj);
9561                         }
9562                 }
9563                 kobject_del(&space_info->kobj);
9564                 kobject_put(&space_info->kobj);
9565         }
9566         return 0;
9567 }
9568
9569 static void __link_block_group(struct btrfs_space_info *space_info,
9570                                struct btrfs_block_group_cache *cache)
9571 {
9572         int index = get_block_group_index(cache);
9573         bool first = false;
9574
9575         down_write(&space_info->groups_sem);
9576         if (list_empty(&space_info->block_groups[index]))
9577                 first = true;
9578         list_add_tail(&cache->list, &space_info->block_groups[index]);
9579         up_write(&space_info->groups_sem);
9580
9581         if (first) {
9582                 struct raid_kobject *rkobj;
9583                 int ret;
9584
9585                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9586                 if (!rkobj)
9587                         goto out_err;
9588                 rkobj->raid_type = index;
9589                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9590                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9591                                   "%s", get_raid_name(index));
9592                 if (ret) {
9593                         kobject_put(&rkobj->kobj);
9594                         goto out_err;
9595                 }
9596                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9597         }
9598
9599         return;
9600 out_err:
9601         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9602 }
9603
9604 static struct btrfs_block_group_cache *
9605 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9606 {
9607         struct btrfs_block_group_cache *cache;
9608
9609         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9610         if (!cache)
9611                 return NULL;
9612
9613         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9614                                         GFP_NOFS);
9615         if (!cache->free_space_ctl) {
9616                 kfree(cache);
9617                 return NULL;
9618         }
9619
9620         cache->key.objectid = start;
9621         cache->key.offset = size;
9622         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9623
9624         cache->sectorsize = root->sectorsize;
9625         cache->fs_info = root->fs_info;
9626         cache->full_stripe_len = btrfs_full_stripe_len(root,
9627                                                &root->fs_info->mapping_tree,
9628                                                start);
9629         atomic_set(&cache->count, 1);
9630         spin_lock_init(&cache->lock);
9631         init_rwsem(&cache->data_rwsem);
9632         INIT_LIST_HEAD(&cache->list);
9633         INIT_LIST_HEAD(&cache->cluster_list);
9634         INIT_LIST_HEAD(&cache->bg_list);
9635         INIT_LIST_HEAD(&cache->ro_list);
9636         INIT_LIST_HEAD(&cache->dirty_list);
9637         INIT_LIST_HEAD(&cache->io_list);
9638         btrfs_init_free_space_ctl(cache);
9639         atomic_set(&cache->trimming, 0);
9640
9641         return cache;
9642 }
9643
9644 int btrfs_read_block_groups(struct btrfs_root *root)
9645 {
9646         struct btrfs_path *path;
9647         int ret;
9648         struct btrfs_block_group_cache *cache;
9649         struct btrfs_fs_info *info = root->fs_info;
9650         struct btrfs_space_info *space_info;
9651         struct btrfs_key key;
9652         struct btrfs_key found_key;
9653         struct extent_buffer *leaf;
9654         int need_clear = 0;
9655         u64 cache_gen;
9656
9657         root = info->extent_root;
9658         key.objectid = 0;
9659         key.offset = 0;
9660         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9661         path = btrfs_alloc_path();
9662         if (!path)
9663                 return -ENOMEM;
9664         path->reada = 1;
9665
9666         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9667         if (btrfs_test_opt(root, SPACE_CACHE) &&
9668             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9669                 need_clear = 1;
9670         if (btrfs_test_opt(root, CLEAR_CACHE))
9671                 need_clear = 1;
9672
9673         while (1) {
9674                 ret = find_first_block_group(root, path, &key);
9675                 if (ret > 0)
9676                         break;
9677                 if (ret != 0)
9678                         goto error;
9679
9680                 leaf = path->nodes[0];
9681                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9682
9683                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9684                                                        found_key.offset);
9685                 if (!cache) {
9686                         ret = -ENOMEM;
9687                         goto error;
9688                 }
9689
9690                 if (need_clear) {
9691                         /*
9692                          * When we mount with old space cache, we need to
9693                          * set BTRFS_DC_CLEAR and set dirty flag.
9694                          *
9695                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9696                          *    truncate the old free space cache inode and
9697                          *    setup a new one.
9698                          * b) Setting 'dirty flag' makes sure that we flush
9699                          *    the new space cache info onto disk.
9700                          */
9701                         if (btrfs_test_opt(root, SPACE_CACHE))
9702                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9703                 }
9704
9705                 read_extent_buffer(leaf, &cache->item,
9706                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9707                                    sizeof(cache->item));
9708                 cache->flags = btrfs_block_group_flags(&cache->item);
9709
9710                 key.objectid = found_key.objectid + found_key.offset;
9711                 btrfs_release_path(path);
9712
9713                 /*
9714                  * We need to exclude the super stripes now so that the space
9715                  * info has super bytes accounted for, otherwise we'll think
9716                  * we have more space than we actually do.
9717                  */
9718                 ret = exclude_super_stripes(root, cache);
9719                 if (ret) {
9720                         /*
9721                          * We may have excluded something, so call this just in
9722                          * case.
9723                          */
9724                         free_excluded_extents(root, cache);
9725                         btrfs_put_block_group(cache);
9726                         goto error;
9727                 }
9728
9729                 /*
9730                  * check for two cases, either we are full, and therefore
9731                  * don't need to bother with the caching work since we won't
9732                  * find any space, or we are empty, and we can just add all
9733                  * the space in and be done with it.  This saves us _alot_ of
9734                  * time, particularly in the full case.
9735                  */
9736                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9737                         cache->last_byte_to_unpin = (u64)-1;
9738                         cache->cached = BTRFS_CACHE_FINISHED;
9739                         free_excluded_extents(root, cache);
9740                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9741                         cache->last_byte_to_unpin = (u64)-1;
9742                         cache->cached = BTRFS_CACHE_FINISHED;
9743                         add_new_free_space(cache, root->fs_info,
9744                                            found_key.objectid,
9745                                            found_key.objectid +
9746                                            found_key.offset);
9747                         free_excluded_extents(root, cache);
9748                 }
9749
9750                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9751                 if (ret) {
9752                         btrfs_remove_free_space_cache(cache);
9753                         btrfs_put_block_group(cache);
9754                         goto error;
9755                 }
9756
9757                 ret = update_space_info(info, cache->flags, found_key.offset,
9758                                         btrfs_block_group_used(&cache->item),
9759                                         &space_info);
9760                 if (ret) {
9761                         btrfs_remove_free_space_cache(cache);
9762                         spin_lock(&info->block_group_cache_lock);
9763                         rb_erase(&cache->cache_node,
9764                                  &info->block_group_cache_tree);
9765                         RB_CLEAR_NODE(&cache->cache_node);
9766                         spin_unlock(&info->block_group_cache_lock);
9767                         btrfs_put_block_group(cache);
9768                         goto error;
9769                 }
9770
9771                 cache->space_info = space_info;
9772                 spin_lock(&cache->space_info->lock);
9773                 cache->space_info->bytes_readonly += cache->bytes_super;
9774                 spin_unlock(&cache->space_info->lock);
9775
9776                 __link_block_group(space_info, cache);
9777
9778                 set_avail_alloc_bits(root->fs_info, cache->flags);
9779                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9780                         inc_block_group_ro(cache, 1);
9781                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9782                         spin_lock(&info->unused_bgs_lock);
9783                         /* Should always be true but just in case. */
9784                         if (list_empty(&cache->bg_list)) {
9785                                 btrfs_get_block_group(cache);
9786                                 list_add_tail(&cache->bg_list,
9787                                               &info->unused_bgs);
9788                         }
9789                         spin_unlock(&info->unused_bgs_lock);
9790                 }
9791         }
9792
9793         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9794                 if (!(get_alloc_profile(root, space_info->flags) &
9795                       (BTRFS_BLOCK_GROUP_RAID10 |
9796                        BTRFS_BLOCK_GROUP_RAID1 |
9797                        BTRFS_BLOCK_GROUP_RAID5 |
9798                        BTRFS_BLOCK_GROUP_RAID6 |
9799                        BTRFS_BLOCK_GROUP_DUP)))
9800                         continue;
9801                 /*
9802                  * avoid allocating from un-mirrored block group if there are
9803                  * mirrored block groups.
9804                  */
9805                 list_for_each_entry(cache,
9806                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9807                                 list)
9808                         inc_block_group_ro(cache, 1);
9809                 list_for_each_entry(cache,
9810                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9811                                 list)
9812                         inc_block_group_ro(cache, 1);
9813         }
9814
9815         init_global_block_rsv(info);
9816         ret = 0;
9817 error:
9818         btrfs_free_path(path);
9819         return ret;
9820 }
9821
9822 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9823                                        struct btrfs_root *root)
9824 {
9825         struct btrfs_block_group_cache *block_group, *tmp;
9826         struct btrfs_root *extent_root = root->fs_info->extent_root;
9827         struct btrfs_block_group_item item;
9828         struct btrfs_key key;
9829         int ret = 0;
9830         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9831
9832         trans->can_flush_pending_bgs = false;
9833         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9834                 if (ret)
9835                         goto next;
9836
9837                 spin_lock(&block_group->lock);
9838                 memcpy(&item, &block_group->item, sizeof(item));
9839                 memcpy(&key, &block_group->key, sizeof(key));
9840                 spin_unlock(&block_group->lock);
9841
9842                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9843                                         sizeof(item));
9844                 if (ret)
9845                         btrfs_abort_transaction(trans, extent_root, ret);
9846                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9847                                                key.objectid, key.offset);
9848                 if (ret)
9849                         btrfs_abort_transaction(trans, extent_root, ret);
9850 next:
9851                 list_del_init(&block_group->bg_list);
9852         }
9853         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9854 }
9855
9856 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9857                            struct btrfs_root *root, u64 bytes_used,
9858                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9859                            u64 size)
9860 {
9861         int ret;
9862         struct btrfs_root *extent_root;
9863         struct btrfs_block_group_cache *cache;
9864
9865         extent_root = root->fs_info->extent_root;
9866
9867         btrfs_set_log_full_commit(root->fs_info, trans);
9868
9869         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9870         if (!cache)
9871                 return -ENOMEM;
9872
9873         btrfs_set_block_group_used(&cache->item, bytes_used);
9874         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9875         btrfs_set_block_group_flags(&cache->item, type);
9876
9877         cache->flags = type;
9878         cache->last_byte_to_unpin = (u64)-1;
9879         cache->cached = BTRFS_CACHE_FINISHED;
9880         ret = exclude_super_stripes(root, cache);
9881         if (ret) {
9882                 /*
9883                  * We may have excluded something, so call this just in
9884                  * case.
9885                  */
9886                 free_excluded_extents(root, cache);
9887                 btrfs_put_block_group(cache);
9888                 return ret;
9889         }
9890
9891         add_new_free_space(cache, root->fs_info, chunk_offset,
9892                            chunk_offset + size);
9893
9894         free_excluded_extents(root, cache);
9895
9896 #ifdef CONFIG_BTRFS_DEBUG
9897         if (btrfs_should_fragment_free_space(root, cache)) {
9898                 u64 new_bytes_used = size - bytes_used;
9899
9900                 bytes_used += new_bytes_used >> 1;
9901                 fragment_free_space(root, cache);
9902         }
9903 #endif
9904         /*
9905          * Call to ensure the corresponding space_info object is created and
9906          * assigned to our block group, but don't update its counters just yet.
9907          * We want our bg to be added to the rbtree with its ->space_info set.
9908          */
9909         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9910                                 &cache->space_info);
9911         if (ret) {
9912                 btrfs_remove_free_space_cache(cache);
9913                 btrfs_put_block_group(cache);
9914                 return ret;
9915         }
9916
9917         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9918         if (ret) {
9919                 btrfs_remove_free_space_cache(cache);
9920                 btrfs_put_block_group(cache);
9921                 return ret;
9922         }
9923
9924         /*
9925          * Now that our block group has its ->space_info set and is inserted in
9926          * the rbtree, update the space info's counters.
9927          */
9928         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9929                                 &cache->space_info);
9930         if (ret) {
9931                 btrfs_remove_free_space_cache(cache);
9932                 spin_lock(&root->fs_info->block_group_cache_lock);
9933                 rb_erase(&cache->cache_node,
9934                          &root->fs_info->block_group_cache_tree);
9935                 RB_CLEAR_NODE(&cache->cache_node);
9936                 spin_unlock(&root->fs_info->block_group_cache_lock);
9937                 btrfs_put_block_group(cache);
9938                 return ret;
9939         }
9940         update_global_block_rsv(root->fs_info);
9941
9942         spin_lock(&cache->space_info->lock);
9943         cache->space_info->bytes_readonly += cache->bytes_super;
9944         spin_unlock(&cache->space_info->lock);
9945
9946         __link_block_group(cache->space_info, cache);
9947
9948         list_add_tail(&cache->bg_list, &trans->new_bgs);
9949
9950         set_avail_alloc_bits(extent_root->fs_info, type);
9951
9952         return 0;
9953 }
9954
9955 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9956 {
9957         u64 extra_flags = chunk_to_extended(flags) &
9958                                 BTRFS_EXTENDED_PROFILE_MASK;
9959
9960         write_seqlock(&fs_info->profiles_lock);
9961         if (flags & BTRFS_BLOCK_GROUP_DATA)
9962                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9963         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9964                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9965         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9966                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9967         write_sequnlock(&fs_info->profiles_lock);
9968 }
9969
9970 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9971                              struct btrfs_root *root, u64 group_start,
9972                              struct extent_map *em)
9973 {
9974         struct btrfs_path *path;
9975         struct btrfs_block_group_cache *block_group;
9976         struct btrfs_free_cluster *cluster;
9977         struct btrfs_root *tree_root = root->fs_info->tree_root;
9978         struct btrfs_key key;
9979         struct inode *inode;
9980         struct kobject *kobj = NULL;
9981         int ret;
9982         int index;
9983         int factor;
9984         struct btrfs_caching_control *caching_ctl = NULL;
9985         bool remove_em;
9986
9987         root = root->fs_info->extent_root;
9988
9989         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9990         BUG_ON(!block_group);
9991         BUG_ON(!block_group->ro);
9992
9993         /*
9994          * Free the reserved super bytes from this block group before
9995          * remove it.
9996          */
9997         free_excluded_extents(root, block_group);
9998
9999         memcpy(&key, &block_group->key, sizeof(key));
10000         index = get_block_group_index(block_group);
10001         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10002                                   BTRFS_BLOCK_GROUP_RAID1 |
10003                                   BTRFS_BLOCK_GROUP_RAID10))
10004                 factor = 2;
10005         else
10006                 factor = 1;
10007
10008         /* make sure this block group isn't part of an allocation cluster */
10009         cluster = &root->fs_info->data_alloc_cluster;
10010         spin_lock(&cluster->refill_lock);
10011         btrfs_return_cluster_to_free_space(block_group, cluster);
10012         spin_unlock(&cluster->refill_lock);
10013
10014         /*
10015          * make sure this block group isn't part of a metadata
10016          * allocation cluster
10017          */
10018         cluster = &root->fs_info->meta_alloc_cluster;
10019         spin_lock(&cluster->refill_lock);
10020         btrfs_return_cluster_to_free_space(block_group, cluster);
10021         spin_unlock(&cluster->refill_lock);
10022
10023         path = btrfs_alloc_path();
10024         if (!path) {
10025                 ret = -ENOMEM;
10026                 goto out;
10027         }
10028
10029         /*
10030          * get the inode first so any iput calls done for the io_list
10031          * aren't the final iput (no unlinks allowed now)
10032          */
10033         inode = lookup_free_space_inode(tree_root, block_group, path);
10034
10035         mutex_lock(&trans->transaction->cache_write_mutex);
10036         /*
10037          * make sure our free spache cache IO is done before remove the
10038          * free space inode
10039          */
10040         spin_lock(&trans->transaction->dirty_bgs_lock);
10041         if (!list_empty(&block_group->io_list)) {
10042                 list_del_init(&block_group->io_list);
10043
10044                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10045
10046                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10047                 btrfs_wait_cache_io(root, trans, block_group,
10048                                     &block_group->io_ctl, path,
10049                                     block_group->key.objectid);
10050                 btrfs_put_block_group(block_group);
10051                 spin_lock(&trans->transaction->dirty_bgs_lock);
10052         }
10053
10054         if (!list_empty(&block_group->dirty_list)) {
10055                 list_del_init(&block_group->dirty_list);
10056                 btrfs_put_block_group(block_group);
10057         }
10058         spin_unlock(&trans->transaction->dirty_bgs_lock);
10059         mutex_unlock(&trans->transaction->cache_write_mutex);
10060
10061         if (!IS_ERR(inode)) {
10062                 ret = btrfs_orphan_add(trans, inode);
10063                 if (ret) {
10064                         btrfs_add_delayed_iput(inode);
10065                         goto out;
10066                 }
10067                 clear_nlink(inode);
10068                 /* One for the block groups ref */
10069                 spin_lock(&block_group->lock);
10070                 if (block_group->iref) {
10071                         block_group->iref = 0;
10072                         block_group->inode = NULL;
10073                         spin_unlock(&block_group->lock);
10074                         iput(inode);
10075                 } else {
10076                         spin_unlock(&block_group->lock);
10077                 }
10078                 /* One for our lookup ref */
10079                 btrfs_add_delayed_iput(inode);
10080         }
10081
10082         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10083         key.offset = block_group->key.objectid;
10084         key.type = 0;
10085
10086         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10087         if (ret < 0)
10088                 goto out;
10089         if (ret > 0)
10090                 btrfs_release_path(path);
10091         if (ret == 0) {
10092                 ret = btrfs_del_item(trans, tree_root, path);
10093                 if (ret)
10094                         goto out;
10095                 btrfs_release_path(path);
10096         }
10097
10098         spin_lock(&root->fs_info->block_group_cache_lock);
10099         rb_erase(&block_group->cache_node,
10100                  &root->fs_info->block_group_cache_tree);
10101         RB_CLEAR_NODE(&block_group->cache_node);
10102
10103         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10104                 root->fs_info->first_logical_byte = (u64)-1;
10105         spin_unlock(&root->fs_info->block_group_cache_lock);
10106
10107         down_write(&block_group->space_info->groups_sem);
10108         /*
10109          * we must use list_del_init so people can check to see if they
10110          * are still on the list after taking the semaphore
10111          */
10112         list_del_init(&block_group->list);
10113         if (list_empty(&block_group->space_info->block_groups[index])) {
10114                 kobj = block_group->space_info->block_group_kobjs[index];
10115                 block_group->space_info->block_group_kobjs[index] = NULL;
10116                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10117         }
10118         up_write(&block_group->space_info->groups_sem);
10119         if (kobj) {
10120                 kobject_del(kobj);
10121                 kobject_put(kobj);
10122         }
10123
10124         if (block_group->has_caching_ctl)
10125                 caching_ctl = get_caching_control(block_group);
10126         if (block_group->cached == BTRFS_CACHE_STARTED)
10127                 wait_block_group_cache_done(block_group);
10128         if (block_group->has_caching_ctl) {
10129                 down_write(&root->fs_info->commit_root_sem);
10130                 if (!caching_ctl) {
10131                         struct btrfs_caching_control *ctl;
10132
10133                         list_for_each_entry(ctl,
10134                                     &root->fs_info->caching_block_groups, list)
10135                                 if (ctl->block_group == block_group) {
10136                                         caching_ctl = ctl;
10137                                         atomic_inc(&caching_ctl->count);
10138                                         break;
10139                                 }
10140                 }
10141                 if (caching_ctl)
10142                         list_del_init(&caching_ctl->list);
10143                 up_write(&root->fs_info->commit_root_sem);
10144                 if (caching_ctl) {
10145                         /* Once for the caching bgs list and once for us. */
10146                         put_caching_control(caching_ctl);
10147                         put_caching_control(caching_ctl);
10148                 }
10149         }
10150
10151         spin_lock(&trans->transaction->dirty_bgs_lock);
10152         if (!list_empty(&block_group->dirty_list)) {
10153                 WARN_ON(1);
10154         }
10155         if (!list_empty(&block_group->io_list)) {
10156                 WARN_ON(1);
10157         }
10158         spin_unlock(&trans->transaction->dirty_bgs_lock);
10159         btrfs_remove_free_space_cache(block_group);
10160
10161         spin_lock(&block_group->space_info->lock);
10162         list_del_init(&block_group->ro_list);
10163
10164         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10165                 WARN_ON(block_group->space_info->total_bytes
10166                         < block_group->key.offset);
10167                 WARN_ON(block_group->space_info->bytes_readonly
10168                         < block_group->key.offset);
10169                 WARN_ON(block_group->space_info->disk_total
10170                         < block_group->key.offset * factor);
10171         }
10172         block_group->space_info->total_bytes -= block_group->key.offset;
10173         block_group->space_info->bytes_readonly -= block_group->key.offset;
10174         block_group->space_info->disk_total -= block_group->key.offset * factor;
10175
10176         spin_unlock(&block_group->space_info->lock);
10177
10178         memcpy(&key, &block_group->key, sizeof(key));
10179
10180         lock_chunks(root);
10181         if (!list_empty(&em->list)) {
10182                 /* We're in the transaction->pending_chunks list. */
10183                 free_extent_map(em);
10184         }
10185         spin_lock(&block_group->lock);
10186         block_group->removed = 1;
10187         /*
10188          * At this point trimming can't start on this block group, because we
10189          * removed the block group from the tree fs_info->block_group_cache_tree
10190          * so no one can't find it anymore and even if someone already got this
10191          * block group before we removed it from the rbtree, they have already
10192          * incremented block_group->trimming - if they didn't, they won't find
10193          * any free space entries because we already removed them all when we
10194          * called btrfs_remove_free_space_cache().
10195          *
10196          * And we must not remove the extent map from the fs_info->mapping_tree
10197          * to prevent the same logical address range and physical device space
10198          * ranges from being reused for a new block group. This is because our
10199          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10200          * completely transactionless, so while it is trimming a range the
10201          * currently running transaction might finish and a new one start,
10202          * allowing for new block groups to be created that can reuse the same
10203          * physical device locations unless we take this special care.
10204          *
10205          * There may also be an implicit trim operation if the file system
10206          * is mounted with -odiscard. The same protections must remain
10207          * in place until the extents have been discarded completely when
10208          * the transaction commit has completed.
10209          */
10210         remove_em = (atomic_read(&block_group->trimming) == 0);
10211         /*
10212          * Make sure a trimmer task always sees the em in the pinned_chunks list
10213          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10214          * before checking block_group->removed).
10215          */
10216         if (!remove_em) {
10217                 /*
10218                  * Our em might be in trans->transaction->pending_chunks which
10219                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10220                  * and so is the fs_info->pinned_chunks list.
10221                  *
10222                  * So at this point we must be holding the chunk_mutex to avoid
10223                  * any races with chunk allocation (more specifically at
10224                  * volumes.c:contains_pending_extent()), to ensure it always
10225                  * sees the em, either in the pending_chunks list or in the
10226                  * pinned_chunks list.
10227                  */
10228                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10229         }
10230         spin_unlock(&block_group->lock);
10231
10232         if (remove_em) {
10233                 struct extent_map_tree *em_tree;
10234
10235                 em_tree = &root->fs_info->mapping_tree.map_tree;
10236                 write_lock(&em_tree->lock);
10237                 /*
10238                  * The em might be in the pending_chunks list, so make sure the
10239                  * chunk mutex is locked, since remove_extent_mapping() will
10240                  * delete us from that list.
10241                  */
10242                 remove_extent_mapping(em_tree, em);
10243                 write_unlock(&em_tree->lock);
10244                 /* once for the tree */
10245                 free_extent_map(em);
10246         }
10247
10248         unlock_chunks(root);
10249
10250         btrfs_put_block_group(block_group);
10251         btrfs_put_block_group(block_group);
10252
10253         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10254         if (ret > 0)
10255                 ret = -EIO;
10256         if (ret < 0)
10257                 goto out;
10258
10259         ret = btrfs_del_item(trans, root, path);
10260 out:
10261         btrfs_free_path(path);
10262         return ret;
10263 }
10264
10265 /*
10266  * Process the unused_bgs list and remove any that don't have any allocated
10267  * space inside of them.
10268  */
10269 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10270 {
10271         struct btrfs_block_group_cache *block_group;
10272         struct btrfs_space_info *space_info;
10273         struct btrfs_root *root = fs_info->extent_root;
10274         struct btrfs_trans_handle *trans;
10275         int ret = 0;
10276
10277         if (!fs_info->open)
10278                 return;
10279
10280         spin_lock(&fs_info->unused_bgs_lock);
10281         while (!list_empty(&fs_info->unused_bgs)) {
10282                 u64 start, end;
10283                 int trimming;
10284
10285                 block_group = list_first_entry(&fs_info->unused_bgs,
10286                                                struct btrfs_block_group_cache,
10287                                                bg_list);
10288                 space_info = block_group->space_info;
10289                 list_del_init(&block_group->bg_list);
10290                 if (ret || btrfs_mixed_space_info(space_info)) {
10291                         btrfs_put_block_group(block_group);
10292                         continue;
10293                 }
10294                 spin_unlock(&fs_info->unused_bgs_lock);
10295
10296                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10297
10298                 /* Don't want to race with allocators so take the groups_sem */
10299                 down_write(&space_info->groups_sem);
10300                 spin_lock(&block_group->lock);
10301                 if (block_group->reserved ||
10302                     btrfs_block_group_used(&block_group->item) ||
10303                     block_group->ro) {
10304                         /*
10305                          * We want to bail if we made new allocations or have
10306                          * outstanding allocations in this block group.  We do
10307                          * the ro check in case balance is currently acting on
10308                          * this block group.
10309                          */
10310                         spin_unlock(&block_group->lock);
10311                         up_write(&space_info->groups_sem);
10312                         goto next;
10313                 }
10314                 spin_unlock(&block_group->lock);
10315
10316                 /* We don't want to force the issue, only flip if it's ok. */
10317                 ret = inc_block_group_ro(block_group, 0);
10318                 up_write(&space_info->groups_sem);
10319                 if (ret < 0) {
10320                         ret = 0;
10321                         goto next;
10322                 }
10323
10324                 /*
10325                  * Want to do this before we do anything else so we can recover
10326                  * properly if we fail to join the transaction.
10327                  */
10328                 /* 1 for btrfs_orphan_reserve_metadata() */
10329                 trans = btrfs_start_transaction(root, 1);
10330                 if (IS_ERR(trans)) {
10331                         btrfs_dec_block_group_ro(root, block_group);
10332                         ret = PTR_ERR(trans);
10333                         goto next;
10334                 }
10335
10336                 /*
10337                  * We could have pending pinned extents for this block group,
10338                  * just delete them, we don't care about them anymore.
10339                  */
10340                 start = block_group->key.objectid;
10341                 end = start + block_group->key.offset - 1;
10342                 /*
10343                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10344                  * btrfs_finish_extent_commit(). If we are at transaction N,
10345                  * another task might be running finish_extent_commit() for the
10346                  * previous transaction N - 1, and have seen a range belonging
10347                  * to the block group in freed_extents[] before we were able to
10348                  * clear the whole block group range from freed_extents[]. This
10349                  * means that task can lookup for the block group after we
10350                  * unpinned it from freed_extents[] and removed it, leading to
10351                  * a BUG_ON() at btrfs_unpin_extent_range().
10352                  */
10353                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10354                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10355                                   EXTENT_DIRTY, GFP_NOFS);
10356                 if (ret) {
10357                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10358                         btrfs_dec_block_group_ro(root, block_group);
10359                         goto end_trans;
10360                 }
10361                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10362                                   EXTENT_DIRTY, GFP_NOFS);
10363                 if (ret) {
10364                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10365                         btrfs_dec_block_group_ro(root, block_group);
10366                         goto end_trans;
10367                 }
10368                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10369
10370                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10371                 spin_lock(&space_info->lock);
10372                 spin_lock(&block_group->lock);
10373
10374                 space_info->bytes_pinned -= block_group->pinned;
10375                 space_info->bytes_readonly += block_group->pinned;
10376                 percpu_counter_add(&space_info->total_bytes_pinned,
10377                                    -block_group->pinned);
10378                 block_group->pinned = 0;
10379
10380                 spin_unlock(&block_group->lock);
10381                 spin_unlock(&space_info->lock);
10382
10383                 /* DISCARD can flip during remount */
10384                 trimming = btrfs_test_opt(root, DISCARD);
10385
10386                 /* Implicit trim during transaction commit. */
10387                 if (trimming)
10388                         btrfs_get_block_group_trimming(block_group);
10389
10390                 /*
10391                  * Btrfs_remove_chunk will abort the transaction if things go
10392                  * horribly wrong.
10393                  */
10394                 ret = btrfs_remove_chunk(trans, root,
10395                                          block_group->key.objectid);
10396
10397                 if (ret) {
10398                         if (trimming)
10399                                 btrfs_put_block_group_trimming(block_group);
10400                         goto end_trans;
10401                 }
10402
10403                 /*
10404                  * If we're not mounted with -odiscard, we can just forget
10405                  * about this block group. Otherwise we'll need to wait
10406                  * until transaction commit to do the actual discard.
10407                  */
10408                 if (trimming) {
10409                         WARN_ON(!list_empty(&block_group->bg_list));
10410                         spin_lock(&trans->transaction->deleted_bgs_lock);
10411                         list_move(&block_group->bg_list,
10412                                   &trans->transaction->deleted_bgs);
10413                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10414                         btrfs_get_block_group(block_group);
10415                 }
10416 end_trans:
10417                 btrfs_end_transaction(trans, root);
10418 next:
10419                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10420                 btrfs_put_block_group(block_group);
10421                 spin_lock(&fs_info->unused_bgs_lock);
10422         }
10423         spin_unlock(&fs_info->unused_bgs_lock);
10424 }
10425
10426 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10427 {
10428         struct btrfs_space_info *space_info;
10429         struct btrfs_super_block *disk_super;
10430         u64 features;
10431         u64 flags;
10432         int mixed = 0;
10433         int ret;
10434
10435         disk_super = fs_info->super_copy;
10436         if (!btrfs_super_root(disk_super))
10437                 return 1;
10438
10439         features = btrfs_super_incompat_flags(disk_super);
10440         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10441                 mixed = 1;
10442
10443         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10444         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10445         if (ret)
10446                 goto out;
10447
10448         if (mixed) {
10449                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10450                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10451         } else {
10452                 flags = BTRFS_BLOCK_GROUP_METADATA;
10453                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10454                 if (ret)
10455                         goto out;
10456
10457                 flags = BTRFS_BLOCK_GROUP_DATA;
10458                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10459         }
10460 out:
10461         return ret;
10462 }
10463
10464 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10465 {
10466         return unpin_extent_range(root, start, end, false);
10467 }
10468
10469 /*
10470  * It used to be that old block groups would be left around forever.
10471  * Iterating over them would be enough to trim unused space.  Since we
10472  * now automatically remove them, we also need to iterate over unallocated
10473  * space.
10474  *
10475  * We don't want a transaction for this since the discard may take a
10476  * substantial amount of time.  We don't require that a transaction be
10477  * running, but we do need to take a running transaction into account
10478  * to ensure that we're not discarding chunks that were released in
10479  * the current transaction.
10480  *
10481  * Holding the chunks lock will prevent other threads from allocating
10482  * or releasing chunks, but it won't prevent a running transaction
10483  * from committing and releasing the memory that the pending chunks
10484  * list head uses.  For that, we need to take a reference to the
10485  * transaction.
10486  */
10487 static int btrfs_trim_free_extents(struct btrfs_device *device,
10488                                    u64 minlen, u64 *trimmed)
10489 {
10490         u64 start = 0, len = 0;
10491         int ret;
10492
10493         *trimmed = 0;
10494
10495         /* Not writeable = nothing to do. */
10496         if (!device->writeable)
10497                 return 0;
10498
10499         /* No free space = nothing to do. */
10500         if (device->total_bytes <= device->bytes_used)
10501                 return 0;
10502
10503         ret = 0;
10504
10505         while (1) {
10506                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10507                 struct btrfs_transaction *trans;
10508                 u64 bytes;
10509
10510                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10511                 if (ret)
10512                         return ret;
10513
10514                 down_read(&fs_info->commit_root_sem);
10515
10516                 spin_lock(&fs_info->trans_lock);
10517                 trans = fs_info->running_transaction;
10518                 if (trans)
10519                         atomic_inc(&trans->use_count);
10520                 spin_unlock(&fs_info->trans_lock);
10521
10522                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10523                                                  &start, &len);
10524                 if (trans)
10525                         btrfs_put_transaction(trans);
10526
10527                 if (ret) {
10528                         up_read(&fs_info->commit_root_sem);
10529                         mutex_unlock(&fs_info->chunk_mutex);
10530                         if (ret == -ENOSPC)
10531                                 ret = 0;
10532                         break;
10533                 }
10534
10535                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10536                 up_read(&fs_info->commit_root_sem);
10537                 mutex_unlock(&fs_info->chunk_mutex);
10538
10539                 if (ret)
10540                         break;
10541
10542                 start += len;
10543                 *trimmed += bytes;
10544
10545                 if (fatal_signal_pending(current)) {
10546                         ret = -ERESTARTSYS;
10547                         break;
10548                 }
10549
10550                 cond_resched();
10551         }
10552
10553         return ret;
10554 }
10555
10556 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10557 {
10558         struct btrfs_fs_info *fs_info = root->fs_info;
10559         struct btrfs_block_group_cache *cache = NULL;
10560         struct btrfs_device *device;
10561         struct list_head *devices;
10562         u64 group_trimmed;
10563         u64 start;
10564         u64 end;
10565         u64 trimmed = 0;
10566         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10567         int ret = 0;
10568
10569         /*
10570          * try to trim all FS space, our block group may start from non-zero.
10571          */
10572         if (range->len == total_bytes)
10573                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10574         else
10575                 cache = btrfs_lookup_block_group(fs_info, range->start);
10576
10577         while (cache) {
10578                 if (cache->key.objectid >= (range->start + range->len)) {
10579                         btrfs_put_block_group(cache);
10580                         break;
10581                 }
10582
10583                 start = max(range->start, cache->key.objectid);
10584                 end = min(range->start + range->len,
10585                                 cache->key.objectid + cache->key.offset);
10586
10587                 if (end - start >= range->minlen) {
10588                         if (!block_group_cache_done(cache)) {
10589                                 ret = cache_block_group(cache, 0);
10590                                 if (ret) {
10591                                         btrfs_put_block_group(cache);
10592                                         break;
10593                                 }
10594                                 ret = wait_block_group_cache_done(cache);
10595                                 if (ret) {
10596                                         btrfs_put_block_group(cache);
10597                                         break;
10598                                 }
10599                         }
10600                         ret = btrfs_trim_block_group(cache,
10601                                                      &group_trimmed,
10602                                                      start,
10603                                                      end,
10604                                                      range->minlen);
10605
10606                         trimmed += group_trimmed;
10607                         if (ret) {
10608                                 btrfs_put_block_group(cache);
10609                                 break;
10610                         }
10611                 }
10612
10613                 cache = next_block_group(fs_info->tree_root, cache);
10614         }
10615
10616         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10617         devices = &root->fs_info->fs_devices->alloc_list;
10618         list_for_each_entry(device, devices, dev_alloc_list) {
10619                 ret = btrfs_trim_free_extents(device, range->minlen,
10620                                               &group_trimmed);
10621                 if (ret)
10622                         break;
10623
10624                 trimmed += group_trimmed;
10625         }
10626         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10627
10628         range->len = trimmed;
10629         return ret;
10630 }
10631
10632 /*
10633  * btrfs_{start,end}_write_no_snapshoting() are similar to
10634  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10635  * data into the page cache through nocow before the subvolume is snapshoted,
10636  * but flush the data into disk after the snapshot creation, or to prevent
10637  * operations while snapshoting is ongoing and that cause the snapshot to be
10638  * inconsistent (writes followed by expanding truncates for example).
10639  */
10640 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10641 {
10642         percpu_counter_dec(&root->subv_writers->counter);
10643         /*
10644          * Make sure counter is updated before we wake up waiters.
10645          */
10646         smp_mb();
10647         if (waitqueue_active(&root->subv_writers->wait))
10648                 wake_up(&root->subv_writers->wait);
10649 }
10650
10651 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10652 {
10653         if (atomic_read(&root->will_be_snapshoted))
10654                 return 0;
10655
10656         percpu_counter_inc(&root->subv_writers->counter);
10657         /*
10658          * Make sure counter is updated before we check for snapshot creation.
10659          */
10660         smp_mb();
10661         if (atomic_read(&root->will_be_snapshoted)) {
10662                 btrfs_end_write_no_snapshoting(root);
10663                 return 0;
10664         }
10665         return 1;
10666 }