btrfs: cleanup, stop casting for extent_map->lookup everywhere
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins);
99 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
100                           struct btrfs_root *extent_root, u64 flags,
101                           int force);
102 static int find_next_key(struct btrfs_path *path, int level,
103                          struct btrfs_key *key);
104 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
105                             int dump_block_groups);
106 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
107                                        u64 num_bytes, int reserve,
108                                        int delalloc);
109 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
110                                u64 num_bytes);
111 int btrfs_pin_extent(struct btrfs_root *root,
112                      u64 bytenr, u64 num_bytes, int reserved);
113
114 static noinline int
115 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 {
117         smp_mb();
118         return cache->cached == BTRFS_CACHE_FINISHED ||
119                 cache->cached == BTRFS_CACHE_ERROR;
120 }
121
122 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
123 {
124         return (cache->flags & bits) == bits;
125 }
126
127 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
128 {
129         atomic_inc(&cache->count);
130 }
131
132 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
133 {
134         if (atomic_dec_and_test(&cache->count)) {
135                 WARN_ON(cache->pinned > 0);
136                 WARN_ON(cache->reserved > 0);
137                 kfree(cache->free_space_ctl);
138                 kfree(cache);
139         }
140 }
141
142 /*
143  * this adds the block group to the fs_info rb tree for the block group
144  * cache
145  */
146 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
147                                 struct btrfs_block_group_cache *block_group)
148 {
149         struct rb_node **p;
150         struct rb_node *parent = NULL;
151         struct btrfs_block_group_cache *cache;
152
153         spin_lock(&info->block_group_cache_lock);
154         p = &info->block_group_cache_tree.rb_node;
155
156         while (*p) {
157                 parent = *p;
158                 cache = rb_entry(parent, struct btrfs_block_group_cache,
159                                  cache_node);
160                 if (block_group->key.objectid < cache->key.objectid) {
161                         p = &(*p)->rb_left;
162                 } else if (block_group->key.objectid > cache->key.objectid) {
163                         p = &(*p)->rb_right;
164                 } else {
165                         spin_unlock(&info->block_group_cache_lock);
166                         return -EEXIST;
167                 }
168         }
169
170         rb_link_node(&block_group->cache_node, parent, p);
171         rb_insert_color(&block_group->cache_node,
172                         &info->block_group_cache_tree);
173
174         if (info->first_logical_byte > block_group->key.objectid)
175                 info->first_logical_byte = block_group->key.objectid;
176
177         spin_unlock(&info->block_group_cache_lock);
178
179         return 0;
180 }
181
182 /*
183  * This will return the block group at or after bytenr if contains is 0, else
184  * it will return the block group that contains the bytenr
185  */
186 static struct btrfs_block_group_cache *
187 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
188                               int contains)
189 {
190         struct btrfs_block_group_cache *cache, *ret = NULL;
191         struct rb_node *n;
192         u64 end, start;
193
194         spin_lock(&info->block_group_cache_lock);
195         n = info->block_group_cache_tree.rb_node;
196
197         while (n) {
198                 cache = rb_entry(n, struct btrfs_block_group_cache,
199                                  cache_node);
200                 end = cache->key.objectid + cache->key.offset - 1;
201                 start = cache->key.objectid;
202
203                 if (bytenr < start) {
204                         if (!contains && (!ret || start < ret->key.objectid))
205                                 ret = cache;
206                         n = n->rb_left;
207                 } else if (bytenr > start) {
208                         if (contains && bytenr <= end) {
209                                 ret = cache;
210                                 break;
211                         }
212                         n = n->rb_right;
213                 } else {
214                         ret = cache;
215                         break;
216                 }
217         }
218         if (ret) {
219                 btrfs_get_block_group(ret);
220                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
221                         info->first_logical_byte = ret->key.objectid;
222         }
223         spin_unlock(&info->block_group_cache_lock);
224
225         return ret;
226 }
227
228 static int add_excluded_extent(struct btrfs_root *root,
229                                u64 start, u64 num_bytes)
230 {
231         u64 end = start + num_bytes - 1;
232         set_extent_bits(&root->fs_info->freed_extents[0],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         set_extent_bits(&root->fs_info->freed_extents[1],
235                         start, end, EXTENT_UPTODATE, GFP_NOFS);
236         return 0;
237 }
238
239 static void free_excluded_extents(struct btrfs_root *root,
240                                   struct btrfs_block_group_cache *cache)
241 {
242         u64 start, end;
243
244         start = cache->key.objectid;
245         end = start + cache->key.offset - 1;
246
247         clear_extent_bits(&root->fs_info->freed_extents[0],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249         clear_extent_bits(&root->fs_info->freed_extents[1],
250                           start, end, EXTENT_UPTODATE, GFP_NOFS);
251 }
252
253 static int exclude_super_stripes(struct btrfs_root *root,
254                                  struct btrfs_block_group_cache *cache)
255 {
256         u64 bytenr;
257         u64 *logical;
258         int stripe_len;
259         int i, nr, ret;
260
261         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
262                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
263                 cache->bytes_super += stripe_len;
264                 ret = add_excluded_extent(root, cache->key.objectid,
265                                           stripe_len);
266                 if (ret)
267                         return ret;
268         }
269
270         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
271                 bytenr = btrfs_sb_offset(i);
272                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
273                                        cache->key.objectid, bytenr,
274                                        0, &logical, &nr, &stripe_len);
275                 if (ret)
276                         return ret;
277
278                 while (nr--) {
279                         u64 start, len;
280
281                         if (logical[nr] > cache->key.objectid +
282                             cache->key.offset)
283                                 continue;
284
285                         if (logical[nr] + stripe_len <= cache->key.objectid)
286                                 continue;
287
288                         start = logical[nr];
289                         if (start < cache->key.objectid) {
290                                 start = cache->key.objectid;
291                                 len = (logical[nr] + stripe_len) - start;
292                         } else {
293                                 len = min_t(u64, stripe_len,
294                                             cache->key.objectid +
295                                             cache->key.offset - start);
296                         }
297
298                         cache->bytes_super += len;
299                         ret = add_excluded_extent(root, start, len);
300                         if (ret) {
301                                 kfree(logical);
302                                 return ret;
303                         }
304                 }
305
306                 kfree(logical);
307         }
308         return 0;
309 }
310
311 static struct btrfs_caching_control *
312 get_caching_control(struct btrfs_block_group_cache *cache)
313 {
314         struct btrfs_caching_control *ctl;
315
316         spin_lock(&cache->lock);
317         if (!cache->caching_ctl) {
318                 spin_unlock(&cache->lock);
319                 return NULL;
320         }
321
322         ctl = cache->caching_ctl;
323         atomic_inc(&ctl->count);
324         spin_unlock(&cache->lock);
325         return ctl;
326 }
327
328 static void put_caching_control(struct btrfs_caching_control *ctl)
329 {
330         if (atomic_dec_and_test(&ctl->count))
331                 kfree(ctl);
332 }
333
334 #ifdef CONFIG_BTRFS_DEBUG
335 static void fragment_free_space(struct btrfs_root *root,
336                                 struct btrfs_block_group_cache *block_group)
337 {
338         u64 start = block_group->key.objectid;
339         u64 len = block_group->key.offset;
340         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
341                 root->nodesize : root->sectorsize;
342         u64 step = chunk << 1;
343
344         while (len > chunk) {
345                 btrfs_remove_free_space(block_group, start, chunk);
346                 start += step;
347                 if (len < step)
348                         len = 0;
349                 else
350                         len -= step;
351         }
352 }
353 #endif
354
355 /*
356  * this is only called by cache_block_group, since we could have freed extents
357  * we need to check the pinned_extents for any extents that can't be used yet
358  * since their free space will be released as soon as the transaction commits.
359  */
360 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
361                               struct btrfs_fs_info *info, u64 start, u64 end)
362 {
363         u64 extent_start, extent_end, size, total_added = 0;
364         int ret;
365
366         while (start < end) {
367                 ret = find_first_extent_bit(info->pinned_extents, start,
368                                             &extent_start, &extent_end,
369                                             EXTENT_DIRTY | EXTENT_UPTODATE,
370                                             NULL);
371                 if (ret)
372                         break;
373
374                 if (extent_start <= start) {
375                         start = extent_end + 1;
376                 } else if (extent_start > start && extent_start < end) {
377                         size = extent_start - start;
378                         total_added += size;
379                         ret = btrfs_add_free_space(block_group, start,
380                                                    size);
381                         BUG_ON(ret); /* -ENOMEM or logic error */
382                         start = extent_end + 1;
383                 } else {
384                         break;
385                 }
386         }
387
388         if (start < end) {
389                 size = end - start;
390                 total_added += size;
391                 ret = btrfs_add_free_space(block_group, start, size);
392                 BUG_ON(ret); /* -ENOMEM or logic error */
393         }
394
395         return total_added;
396 }
397
398 static noinline void caching_thread(struct btrfs_work *work)
399 {
400         struct btrfs_block_group_cache *block_group;
401         struct btrfs_fs_info *fs_info;
402         struct btrfs_caching_control *caching_ctl;
403         struct btrfs_root *extent_root;
404         struct btrfs_path *path;
405         struct extent_buffer *leaf;
406         struct btrfs_key key;
407         u64 total_found = 0;
408         u64 last = 0;
409         u32 nritems;
410         int ret = -ENOMEM;
411         bool wakeup = true;
412
413         caching_ctl = container_of(work, struct btrfs_caching_control, work);
414         block_group = caching_ctl->block_group;
415         fs_info = block_group->fs_info;
416         extent_root = fs_info->extent_root;
417
418         path = btrfs_alloc_path();
419         if (!path)
420                 goto out;
421
422         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
423
424 #ifdef CONFIG_BTRFS_DEBUG
425         /*
426          * If we're fragmenting we don't want to make anybody think we can
427          * allocate from this block group until we've had a chance to fragment
428          * the free space.
429          */
430         if (btrfs_should_fragment_free_space(extent_root, block_group))
431                 wakeup = false;
432 #endif
433         /*
434          * We don't want to deadlock with somebody trying to allocate a new
435          * extent for the extent root while also trying to search the extent
436          * root to add free space.  So we skip locking and search the commit
437          * root, since its read-only
438          */
439         path->skip_locking = 1;
440         path->search_commit_root = 1;
441         path->reada = 1;
442
443         key.objectid = last;
444         key.offset = 0;
445         key.type = BTRFS_EXTENT_ITEM_KEY;
446 again:
447         mutex_lock(&caching_ctl->mutex);
448         /* need to make sure the commit_root doesn't disappear */
449         down_read(&fs_info->commit_root_sem);
450
451 next:
452         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
453         if (ret < 0)
454                 goto err;
455
456         leaf = path->nodes[0];
457         nritems = btrfs_header_nritems(leaf);
458
459         while (1) {
460                 if (btrfs_fs_closing(fs_info) > 1) {
461                         last = (u64)-1;
462                         break;
463                 }
464
465                 if (path->slots[0] < nritems) {
466                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
467                 } else {
468                         ret = find_next_key(path, 0, &key);
469                         if (ret)
470                                 break;
471
472                         if (need_resched() ||
473                             rwsem_is_contended(&fs_info->commit_root_sem)) {
474                                 if (wakeup)
475                                         caching_ctl->progress = last;
476                                 btrfs_release_path(path);
477                                 up_read(&fs_info->commit_root_sem);
478                                 mutex_unlock(&caching_ctl->mutex);
479                                 cond_resched();
480                                 goto again;
481                         }
482
483                         ret = btrfs_next_leaf(extent_root, path);
484                         if (ret < 0)
485                                 goto err;
486                         if (ret)
487                                 break;
488                         leaf = path->nodes[0];
489                         nritems = btrfs_header_nritems(leaf);
490                         continue;
491                 }
492
493                 if (key.objectid < last) {
494                         key.objectid = last;
495                         key.offset = 0;
496                         key.type = BTRFS_EXTENT_ITEM_KEY;
497
498                         if (wakeup)
499                                 caching_ctl->progress = last;
500                         btrfs_release_path(path);
501                         goto next;
502                 }
503
504                 if (key.objectid < block_group->key.objectid) {
505                         path->slots[0]++;
506                         continue;
507                 }
508
509                 if (key.objectid >= block_group->key.objectid +
510                     block_group->key.offset)
511                         break;
512
513                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
514                     key.type == BTRFS_METADATA_ITEM_KEY) {
515                         total_found += add_new_free_space(block_group,
516                                                           fs_info, last,
517                                                           key.objectid);
518                         if (key.type == BTRFS_METADATA_ITEM_KEY)
519                                 last = key.objectid +
520                                         fs_info->tree_root->nodesize;
521                         else
522                                 last = key.objectid + key.offset;
523
524                         if (total_found > SZ_2M) {
525                                 total_found = 0;
526                                 if (wakeup)
527                                         wake_up(&caching_ctl->wait);
528                         }
529                 }
530                 path->slots[0]++;
531         }
532         ret = 0;
533
534         total_found += add_new_free_space(block_group, fs_info, last,
535                                           block_group->key.objectid +
536                                           block_group->key.offset);
537         spin_lock(&block_group->lock);
538         block_group->caching_ctl = NULL;
539         block_group->cached = BTRFS_CACHE_FINISHED;
540         spin_unlock(&block_group->lock);
541
542 #ifdef CONFIG_BTRFS_DEBUG
543         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
544                 u64 bytes_used;
545
546                 spin_lock(&block_group->space_info->lock);
547                 spin_lock(&block_group->lock);
548                 bytes_used = block_group->key.offset -
549                         btrfs_block_group_used(&block_group->item);
550                 block_group->space_info->bytes_used += bytes_used >> 1;
551                 spin_unlock(&block_group->lock);
552                 spin_unlock(&block_group->space_info->lock);
553                 fragment_free_space(extent_root, block_group);
554         }
555 #endif
556
557         caching_ctl->progress = (u64)-1;
558 err:
559         btrfs_free_path(path);
560         up_read(&fs_info->commit_root_sem);
561
562         free_excluded_extents(extent_root, block_group);
563
564         mutex_unlock(&caching_ctl->mutex);
565 out:
566         if (ret) {
567                 spin_lock(&block_group->lock);
568                 block_group->caching_ctl = NULL;
569                 block_group->cached = BTRFS_CACHE_ERROR;
570                 spin_unlock(&block_group->lock);
571         }
572         wake_up(&caching_ctl->wait);
573
574         put_caching_control(caching_ctl);
575         btrfs_put_block_group(block_group);
576 }
577
578 static int cache_block_group(struct btrfs_block_group_cache *cache,
579                              int load_cache_only)
580 {
581         DEFINE_WAIT(wait);
582         struct btrfs_fs_info *fs_info = cache->fs_info;
583         struct btrfs_caching_control *caching_ctl;
584         int ret = 0;
585
586         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
587         if (!caching_ctl)
588                 return -ENOMEM;
589
590         INIT_LIST_HEAD(&caching_ctl->list);
591         mutex_init(&caching_ctl->mutex);
592         init_waitqueue_head(&caching_ctl->wait);
593         caching_ctl->block_group = cache;
594         caching_ctl->progress = cache->key.objectid;
595         atomic_set(&caching_ctl->count, 1);
596         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
597                         caching_thread, NULL, NULL);
598
599         spin_lock(&cache->lock);
600         /*
601          * This should be a rare occasion, but this could happen I think in the
602          * case where one thread starts to load the space cache info, and then
603          * some other thread starts a transaction commit which tries to do an
604          * allocation while the other thread is still loading the space cache
605          * info.  The previous loop should have kept us from choosing this block
606          * group, but if we've moved to the state where we will wait on caching
607          * block groups we need to first check if we're doing a fast load here,
608          * so we can wait for it to finish, otherwise we could end up allocating
609          * from a block group who's cache gets evicted for one reason or
610          * another.
611          */
612         while (cache->cached == BTRFS_CACHE_FAST) {
613                 struct btrfs_caching_control *ctl;
614
615                 ctl = cache->caching_ctl;
616                 atomic_inc(&ctl->count);
617                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
618                 spin_unlock(&cache->lock);
619
620                 schedule();
621
622                 finish_wait(&ctl->wait, &wait);
623                 put_caching_control(ctl);
624                 spin_lock(&cache->lock);
625         }
626
627         if (cache->cached != BTRFS_CACHE_NO) {
628                 spin_unlock(&cache->lock);
629                 kfree(caching_ctl);
630                 return 0;
631         }
632         WARN_ON(cache->caching_ctl);
633         cache->caching_ctl = caching_ctl;
634         cache->cached = BTRFS_CACHE_FAST;
635         spin_unlock(&cache->lock);
636
637         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
638                 mutex_lock(&caching_ctl->mutex);
639                 ret = load_free_space_cache(fs_info, cache);
640
641                 spin_lock(&cache->lock);
642                 if (ret == 1) {
643                         cache->caching_ctl = NULL;
644                         cache->cached = BTRFS_CACHE_FINISHED;
645                         cache->last_byte_to_unpin = (u64)-1;
646                         caching_ctl->progress = (u64)-1;
647                 } else {
648                         if (load_cache_only) {
649                                 cache->caching_ctl = NULL;
650                                 cache->cached = BTRFS_CACHE_NO;
651                         } else {
652                                 cache->cached = BTRFS_CACHE_STARTED;
653                                 cache->has_caching_ctl = 1;
654                         }
655                 }
656                 spin_unlock(&cache->lock);
657 #ifdef CONFIG_BTRFS_DEBUG
658                 if (ret == 1 &&
659                     btrfs_should_fragment_free_space(fs_info->extent_root,
660                                                      cache)) {
661                         u64 bytes_used;
662
663                         spin_lock(&cache->space_info->lock);
664                         spin_lock(&cache->lock);
665                         bytes_used = cache->key.offset -
666                                 btrfs_block_group_used(&cache->item);
667                         cache->space_info->bytes_used += bytes_used >> 1;
668                         spin_unlock(&cache->lock);
669                         spin_unlock(&cache->space_info->lock);
670                         fragment_free_space(fs_info->extent_root, cache);
671                 }
672 #endif
673                 mutex_unlock(&caching_ctl->mutex);
674
675                 wake_up(&caching_ctl->wait);
676                 if (ret == 1) {
677                         put_caching_control(caching_ctl);
678                         free_excluded_extents(fs_info->extent_root, cache);
679                         return 0;
680                 }
681         } else {
682                 /*
683                  * We are not going to do the fast caching, set cached to the
684                  * appropriate value and wakeup any waiters.
685                  */
686                 spin_lock(&cache->lock);
687                 if (load_cache_only) {
688                         cache->caching_ctl = NULL;
689                         cache->cached = BTRFS_CACHE_NO;
690                 } else {
691                         cache->cached = BTRFS_CACHE_STARTED;
692                         cache->has_caching_ctl = 1;
693                 }
694                 spin_unlock(&cache->lock);
695                 wake_up(&caching_ctl->wait);
696         }
697
698         if (load_cache_only) {
699                 put_caching_control(caching_ctl);
700                 return 0;
701         }
702
703         down_write(&fs_info->commit_root_sem);
704         atomic_inc(&caching_ctl->count);
705         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
706         up_write(&fs_info->commit_root_sem);
707
708         btrfs_get_block_group(cache);
709
710         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
711
712         return ret;
713 }
714
715 /*
716  * return the block group that starts at or after bytenr
717  */
718 static struct btrfs_block_group_cache *
719 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
720 {
721         struct btrfs_block_group_cache *cache;
722
723         cache = block_group_cache_tree_search(info, bytenr, 0);
724
725         return cache;
726 }
727
728 /*
729  * return the block group that contains the given bytenr
730  */
731 struct btrfs_block_group_cache *btrfs_lookup_block_group(
732                                                  struct btrfs_fs_info *info,
733                                                  u64 bytenr)
734 {
735         struct btrfs_block_group_cache *cache;
736
737         cache = block_group_cache_tree_search(info, bytenr, 1);
738
739         return cache;
740 }
741
742 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
743                                                   u64 flags)
744 {
745         struct list_head *head = &info->space_info;
746         struct btrfs_space_info *found;
747
748         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
749
750         rcu_read_lock();
751         list_for_each_entry_rcu(found, head, list) {
752                 if (found->flags & flags) {
753                         rcu_read_unlock();
754                         return found;
755                 }
756         }
757         rcu_read_unlock();
758         return NULL;
759 }
760
761 /*
762  * after adding space to the filesystem, we need to clear the full flags
763  * on all the space infos.
764  */
765 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
766 {
767         struct list_head *head = &info->space_info;
768         struct btrfs_space_info *found;
769
770         rcu_read_lock();
771         list_for_each_entry_rcu(found, head, list)
772                 found->full = 0;
773         rcu_read_unlock();
774 }
775
776 /* simple helper to search for an existing data extent at a given offset */
777 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
778 {
779         int ret;
780         struct btrfs_key key;
781         struct btrfs_path *path;
782
783         path = btrfs_alloc_path();
784         if (!path)
785                 return -ENOMEM;
786
787         key.objectid = start;
788         key.offset = len;
789         key.type = BTRFS_EXTENT_ITEM_KEY;
790         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
791                                 0, 0);
792         btrfs_free_path(path);
793         return ret;
794 }
795
796 /*
797  * helper function to lookup reference count and flags of a tree block.
798  *
799  * the head node for delayed ref is used to store the sum of all the
800  * reference count modifications queued up in the rbtree. the head
801  * node may also store the extent flags to set. This way you can check
802  * to see what the reference count and extent flags would be if all of
803  * the delayed refs are not processed.
804  */
805 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
806                              struct btrfs_root *root, u64 bytenr,
807                              u64 offset, int metadata, u64 *refs, u64 *flags)
808 {
809         struct btrfs_delayed_ref_head *head;
810         struct btrfs_delayed_ref_root *delayed_refs;
811         struct btrfs_path *path;
812         struct btrfs_extent_item *ei;
813         struct extent_buffer *leaf;
814         struct btrfs_key key;
815         u32 item_size;
816         u64 num_refs;
817         u64 extent_flags;
818         int ret;
819
820         /*
821          * If we don't have skinny metadata, don't bother doing anything
822          * different
823          */
824         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
825                 offset = root->nodesize;
826                 metadata = 0;
827         }
828
829         path = btrfs_alloc_path();
830         if (!path)
831                 return -ENOMEM;
832
833         if (!trans) {
834                 path->skip_locking = 1;
835                 path->search_commit_root = 1;
836         }
837
838 search_again:
839         key.objectid = bytenr;
840         key.offset = offset;
841         if (metadata)
842                 key.type = BTRFS_METADATA_ITEM_KEY;
843         else
844                 key.type = BTRFS_EXTENT_ITEM_KEY;
845
846         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
847                                 &key, path, 0, 0);
848         if (ret < 0)
849                 goto out_free;
850
851         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
852                 if (path->slots[0]) {
853                         path->slots[0]--;
854                         btrfs_item_key_to_cpu(path->nodes[0], &key,
855                                               path->slots[0]);
856                         if (key.objectid == bytenr &&
857                             key.type == BTRFS_EXTENT_ITEM_KEY &&
858                             key.offset == root->nodesize)
859                                 ret = 0;
860                 }
861         }
862
863         if (ret == 0) {
864                 leaf = path->nodes[0];
865                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
866                 if (item_size >= sizeof(*ei)) {
867                         ei = btrfs_item_ptr(leaf, path->slots[0],
868                                             struct btrfs_extent_item);
869                         num_refs = btrfs_extent_refs(leaf, ei);
870                         extent_flags = btrfs_extent_flags(leaf, ei);
871                 } else {
872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
873                         struct btrfs_extent_item_v0 *ei0;
874                         BUG_ON(item_size != sizeof(*ei0));
875                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
876                                              struct btrfs_extent_item_v0);
877                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
878                         /* FIXME: this isn't correct for data */
879                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
880 #else
881                         BUG();
882 #endif
883                 }
884                 BUG_ON(num_refs == 0);
885         } else {
886                 num_refs = 0;
887                 extent_flags = 0;
888                 ret = 0;
889         }
890
891         if (!trans)
892                 goto out;
893
894         delayed_refs = &trans->transaction->delayed_refs;
895         spin_lock(&delayed_refs->lock);
896         head = btrfs_find_delayed_ref_head(trans, bytenr);
897         if (head) {
898                 if (!mutex_trylock(&head->mutex)) {
899                         atomic_inc(&head->node.refs);
900                         spin_unlock(&delayed_refs->lock);
901
902                         btrfs_release_path(path);
903
904                         /*
905                          * Mutex was contended, block until it's released and try
906                          * again
907                          */
908                         mutex_lock(&head->mutex);
909                         mutex_unlock(&head->mutex);
910                         btrfs_put_delayed_ref(&head->node);
911                         goto search_again;
912                 }
913                 spin_lock(&head->lock);
914                 if (head->extent_op && head->extent_op->update_flags)
915                         extent_flags |= head->extent_op->flags_to_set;
916                 else
917                         BUG_ON(num_refs == 0);
918
919                 num_refs += head->node.ref_mod;
920                 spin_unlock(&head->lock);
921                 mutex_unlock(&head->mutex);
922         }
923         spin_unlock(&delayed_refs->lock);
924 out:
925         WARN_ON(num_refs == 0);
926         if (refs)
927                 *refs = num_refs;
928         if (flags)
929                 *flags = extent_flags;
930 out_free:
931         btrfs_free_path(path);
932         return ret;
933 }
934
935 /*
936  * Back reference rules.  Back refs have three main goals:
937  *
938  * 1) differentiate between all holders of references to an extent so that
939  *    when a reference is dropped we can make sure it was a valid reference
940  *    before freeing the extent.
941  *
942  * 2) Provide enough information to quickly find the holders of an extent
943  *    if we notice a given block is corrupted or bad.
944  *
945  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
946  *    maintenance.  This is actually the same as #2, but with a slightly
947  *    different use case.
948  *
949  * There are two kinds of back refs. The implicit back refs is optimized
950  * for pointers in non-shared tree blocks. For a given pointer in a block,
951  * back refs of this kind provide information about the block's owner tree
952  * and the pointer's key. These information allow us to find the block by
953  * b-tree searching. The full back refs is for pointers in tree blocks not
954  * referenced by their owner trees. The location of tree block is recorded
955  * in the back refs. Actually the full back refs is generic, and can be
956  * used in all cases the implicit back refs is used. The major shortcoming
957  * of the full back refs is its overhead. Every time a tree block gets
958  * COWed, we have to update back refs entry for all pointers in it.
959  *
960  * For a newly allocated tree block, we use implicit back refs for
961  * pointers in it. This means most tree related operations only involve
962  * implicit back refs. For a tree block created in old transaction, the
963  * only way to drop a reference to it is COW it. So we can detect the
964  * event that tree block loses its owner tree's reference and do the
965  * back refs conversion.
966  *
967  * When a tree block is COW'd through a tree, there are four cases:
968  *
969  * The reference count of the block is one and the tree is the block's
970  * owner tree. Nothing to do in this case.
971  *
972  * The reference count of the block is one and the tree is not the
973  * block's owner tree. In this case, full back refs is used for pointers
974  * in the block. Remove these full back refs, add implicit back refs for
975  * every pointers in the new block.
976  *
977  * The reference count of the block is greater than one and the tree is
978  * the block's owner tree. In this case, implicit back refs is used for
979  * pointers in the block. Add full back refs for every pointers in the
980  * block, increase lower level extents' reference counts. The original
981  * implicit back refs are entailed to the new block.
982  *
983  * The reference count of the block is greater than one and the tree is
984  * not the block's owner tree. Add implicit back refs for every pointer in
985  * the new block, increase lower level extents' reference count.
986  *
987  * Back Reference Key composing:
988  *
989  * The key objectid corresponds to the first byte in the extent,
990  * The key type is used to differentiate between types of back refs.
991  * There are different meanings of the key offset for different types
992  * of back refs.
993  *
994  * File extents can be referenced by:
995  *
996  * - multiple snapshots, subvolumes, or different generations in one subvol
997  * - different files inside a single subvolume
998  * - different offsets inside a file (bookend extents in file.c)
999  *
1000  * The extent ref structure for the implicit back refs has fields for:
1001  *
1002  * - Objectid of the subvolume root
1003  * - objectid of the file holding the reference
1004  * - original offset in the file
1005  * - how many bookend extents
1006  *
1007  * The key offset for the implicit back refs is hash of the first
1008  * three fields.
1009  *
1010  * The extent ref structure for the full back refs has field for:
1011  *
1012  * - number of pointers in the tree leaf
1013  *
1014  * The key offset for the implicit back refs is the first byte of
1015  * the tree leaf
1016  *
1017  * When a file extent is allocated, The implicit back refs is used.
1018  * the fields are filled in:
1019  *
1020  *     (root_key.objectid, inode objectid, offset in file, 1)
1021  *
1022  * When a file extent is removed file truncation, we find the
1023  * corresponding implicit back refs and check the following fields:
1024  *
1025  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1026  *
1027  * Btree extents can be referenced by:
1028  *
1029  * - Different subvolumes
1030  *
1031  * Both the implicit back refs and the full back refs for tree blocks
1032  * only consist of key. The key offset for the implicit back refs is
1033  * objectid of block's owner tree. The key offset for the full back refs
1034  * is the first byte of parent block.
1035  *
1036  * When implicit back refs is used, information about the lowest key and
1037  * level of the tree block are required. These information are stored in
1038  * tree block info structure.
1039  */
1040
1041 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1042 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1043                                   struct btrfs_root *root,
1044                                   struct btrfs_path *path,
1045                                   u64 owner, u32 extra_size)
1046 {
1047         struct btrfs_extent_item *item;
1048         struct btrfs_extent_item_v0 *ei0;
1049         struct btrfs_extent_ref_v0 *ref0;
1050         struct btrfs_tree_block_info *bi;
1051         struct extent_buffer *leaf;
1052         struct btrfs_key key;
1053         struct btrfs_key found_key;
1054         u32 new_size = sizeof(*item);
1055         u64 refs;
1056         int ret;
1057
1058         leaf = path->nodes[0];
1059         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1060
1061         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1062         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1063                              struct btrfs_extent_item_v0);
1064         refs = btrfs_extent_refs_v0(leaf, ei0);
1065
1066         if (owner == (u64)-1) {
1067                 while (1) {
1068                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1069                                 ret = btrfs_next_leaf(root, path);
1070                                 if (ret < 0)
1071                                         return ret;
1072                                 BUG_ON(ret > 0); /* Corruption */
1073                                 leaf = path->nodes[0];
1074                         }
1075                         btrfs_item_key_to_cpu(leaf, &found_key,
1076                                               path->slots[0]);
1077                         BUG_ON(key.objectid != found_key.objectid);
1078                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1079                                 path->slots[0]++;
1080                                 continue;
1081                         }
1082                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1083                                               struct btrfs_extent_ref_v0);
1084                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1085                         break;
1086                 }
1087         }
1088         btrfs_release_path(path);
1089
1090         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1091                 new_size += sizeof(*bi);
1092
1093         new_size -= sizeof(*ei0);
1094         ret = btrfs_search_slot(trans, root, &key, path,
1095                                 new_size + extra_size, 1);
1096         if (ret < 0)
1097                 return ret;
1098         BUG_ON(ret); /* Corruption */
1099
1100         btrfs_extend_item(root, path, new_size);
1101
1102         leaf = path->nodes[0];
1103         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1104         btrfs_set_extent_refs(leaf, item, refs);
1105         /* FIXME: get real generation */
1106         btrfs_set_extent_generation(leaf, item, 0);
1107         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1108                 btrfs_set_extent_flags(leaf, item,
1109                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1110                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1111                 bi = (struct btrfs_tree_block_info *)(item + 1);
1112                 /* FIXME: get first key of the block */
1113                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1114                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1115         } else {
1116                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1117         }
1118         btrfs_mark_buffer_dirty(leaf);
1119         return 0;
1120 }
1121 #endif
1122
1123 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1124 {
1125         u32 high_crc = ~(u32)0;
1126         u32 low_crc = ~(u32)0;
1127         __le64 lenum;
1128
1129         lenum = cpu_to_le64(root_objectid);
1130         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1131         lenum = cpu_to_le64(owner);
1132         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1133         lenum = cpu_to_le64(offset);
1134         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1135
1136         return ((u64)high_crc << 31) ^ (u64)low_crc;
1137 }
1138
1139 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1140                                      struct btrfs_extent_data_ref *ref)
1141 {
1142         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1143                                     btrfs_extent_data_ref_objectid(leaf, ref),
1144                                     btrfs_extent_data_ref_offset(leaf, ref));
1145 }
1146
1147 static int match_extent_data_ref(struct extent_buffer *leaf,
1148                                  struct btrfs_extent_data_ref *ref,
1149                                  u64 root_objectid, u64 owner, u64 offset)
1150 {
1151         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1152             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1153             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1154                 return 0;
1155         return 1;
1156 }
1157
1158 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1159                                            struct btrfs_root *root,
1160                                            struct btrfs_path *path,
1161                                            u64 bytenr, u64 parent,
1162                                            u64 root_objectid,
1163                                            u64 owner, u64 offset)
1164 {
1165         struct btrfs_key key;
1166         struct btrfs_extent_data_ref *ref;
1167         struct extent_buffer *leaf;
1168         u32 nritems;
1169         int ret;
1170         int recow;
1171         int err = -ENOENT;
1172
1173         key.objectid = bytenr;
1174         if (parent) {
1175                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1176                 key.offset = parent;
1177         } else {
1178                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179                 key.offset = hash_extent_data_ref(root_objectid,
1180                                                   owner, offset);
1181         }
1182 again:
1183         recow = 0;
1184         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1185         if (ret < 0) {
1186                 err = ret;
1187                 goto fail;
1188         }
1189
1190         if (parent) {
1191                 if (!ret)
1192                         return 0;
1193 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1194                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1195                 btrfs_release_path(path);
1196                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1197                 if (ret < 0) {
1198                         err = ret;
1199                         goto fail;
1200                 }
1201                 if (!ret)
1202                         return 0;
1203 #endif
1204                 goto fail;
1205         }
1206
1207         leaf = path->nodes[0];
1208         nritems = btrfs_header_nritems(leaf);
1209         while (1) {
1210                 if (path->slots[0] >= nritems) {
1211                         ret = btrfs_next_leaf(root, path);
1212                         if (ret < 0)
1213                                 err = ret;
1214                         if (ret)
1215                                 goto fail;
1216
1217                         leaf = path->nodes[0];
1218                         nritems = btrfs_header_nritems(leaf);
1219                         recow = 1;
1220                 }
1221
1222                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1223                 if (key.objectid != bytenr ||
1224                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1225                         goto fail;
1226
1227                 ref = btrfs_item_ptr(leaf, path->slots[0],
1228                                      struct btrfs_extent_data_ref);
1229
1230                 if (match_extent_data_ref(leaf, ref, root_objectid,
1231                                           owner, offset)) {
1232                         if (recow) {
1233                                 btrfs_release_path(path);
1234                                 goto again;
1235                         }
1236                         err = 0;
1237                         break;
1238                 }
1239                 path->slots[0]++;
1240         }
1241 fail:
1242         return err;
1243 }
1244
1245 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1246                                            struct btrfs_root *root,
1247                                            struct btrfs_path *path,
1248                                            u64 bytenr, u64 parent,
1249                                            u64 root_objectid, u64 owner,
1250                                            u64 offset, int refs_to_add)
1251 {
1252         struct btrfs_key key;
1253         struct extent_buffer *leaf;
1254         u32 size;
1255         u32 num_refs;
1256         int ret;
1257
1258         key.objectid = bytenr;
1259         if (parent) {
1260                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1261                 key.offset = parent;
1262                 size = sizeof(struct btrfs_shared_data_ref);
1263         } else {
1264                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1265                 key.offset = hash_extent_data_ref(root_objectid,
1266                                                   owner, offset);
1267                 size = sizeof(struct btrfs_extent_data_ref);
1268         }
1269
1270         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1271         if (ret && ret != -EEXIST)
1272                 goto fail;
1273
1274         leaf = path->nodes[0];
1275         if (parent) {
1276                 struct btrfs_shared_data_ref *ref;
1277                 ref = btrfs_item_ptr(leaf, path->slots[0],
1278                                      struct btrfs_shared_data_ref);
1279                 if (ret == 0) {
1280                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1281                 } else {
1282                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1283                         num_refs += refs_to_add;
1284                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1285                 }
1286         } else {
1287                 struct btrfs_extent_data_ref *ref;
1288                 while (ret == -EEXIST) {
1289                         ref = btrfs_item_ptr(leaf, path->slots[0],
1290                                              struct btrfs_extent_data_ref);
1291                         if (match_extent_data_ref(leaf, ref, root_objectid,
1292                                                   owner, offset))
1293                                 break;
1294                         btrfs_release_path(path);
1295                         key.offset++;
1296                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1297                                                       size);
1298                         if (ret && ret != -EEXIST)
1299                                 goto fail;
1300
1301                         leaf = path->nodes[0];
1302                 }
1303                 ref = btrfs_item_ptr(leaf, path->slots[0],
1304                                      struct btrfs_extent_data_ref);
1305                 if (ret == 0) {
1306                         btrfs_set_extent_data_ref_root(leaf, ref,
1307                                                        root_objectid);
1308                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1309                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1310                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1311                 } else {
1312                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1313                         num_refs += refs_to_add;
1314                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1315                 }
1316         }
1317         btrfs_mark_buffer_dirty(leaf);
1318         ret = 0;
1319 fail:
1320         btrfs_release_path(path);
1321         return ret;
1322 }
1323
1324 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1325                                            struct btrfs_root *root,
1326                                            struct btrfs_path *path,
1327                                            int refs_to_drop, int *last_ref)
1328 {
1329         struct btrfs_key key;
1330         struct btrfs_extent_data_ref *ref1 = NULL;
1331         struct btrfs_shared_data_ref *ref2 = NULL;
1332         struct extent_buffer *leaf;
1333         u32 num_refs = 0;
1334         int ret = 0;
1335
1336         leaf = path->nodes[0];
1337         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1338
1339         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341                                       struct btrfs_extent_data_ref);
1342                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_shared_data_ref);
1346                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349                 struct btrfs_extent_ref_v0 *ref0;
1350                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_ref_v0);
1352                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354         } else {
1355                 BUG();
1356         }
1357
1358         BUG_ON(num_refs < refs_to_drop);
1359         num_refs -= refs_to_drop;
1360
1361         if (num_refs == 0) {
1362                 ret = btrfs_del_item(trans, root, path);
1363                 *last_ref = 1;
1364         } else {
1365                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1366                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1367                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1368                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1370                 else {
1371                         struct btrfs_extent_ref_v0 *ref0;
1372                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1373                                         struct btrfs_extent_ref_v0);
1374                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1375                 }
1376 #endif
1377                 btrfs_mark_buffer_dirty(leaf);
1378         }
1379         return ret;
1380 }
1381
1382 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1383                                           struct btrfs_extent_inline_ref *iref)
1384 {
1385         struct btrfs_key key;
1386         struct extent_buffer *leaf;
1387         struct btrfs_extent_data_ref *ref1;
1388         struct btrfs_shared_data_ref *ref2;
1389         u32 num_refs = 0;
1390
1391         leaf = path->nodes[0];
1392         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1393         if (iref) {
1394                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1395                     BTRFS_EXTENT_DATA_REF_KEY) {
1396                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1397                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1398                 } else {
1399                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1400                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1401                 }
1402         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1403                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1404                                       struct btrfs_extent_data_ref);
1405                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1406         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1407                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1408                                       struct btrfs_shared_data_ref);
1409                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1411         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1412                 struct btrfs_extent_ref_v0 *ref0;
1413                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1414                                       struct btrfs_extent_ref_v0);
1415                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1416 #endif
1417         } else {
1418                 WARN_ON(1);
1419         }
1420         return num_refs;
1421 }
1422
1423 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1424                                           struct btrfs_root *root,
1425                                           struct btrfs_path *path,
1426                                           u64 bytenr, u64 parent,
1427                                           u64 root_objectid)
1428 {
1429         struct btrfs_key key;
1430         int ret;
1431
1432         key.objectid = bytenr;
1433         if (parent) {
1434                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1435                 key.offset = parent;
1436         } else {
1437                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1438                 key.offset = root_objectid;
1439         }
1440
1441         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1442         if (ret > 0)
1443                 ret = -ENOENT;
1444 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1445         if (ret == -ENOENT && parent) {
1446                 btrfs_release_path(path);
1447                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1448                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1449                 if (ret > 0)
1450                         ret = -ENOENT;
1451         }
1452 #endif
1453         return ret;
1454 }
1455
1456 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1457                                           struct btrfs_root *root,
1458                                           struct btrfs_path *path,
1459                                           u64 bytenr, u64 parent,
1460                                           u64 root_objectid)
1461 {
1462         struct btrfs_key key;
1463         int ret;
1464
1465         key.objectid = bytenr;
1466         if (parent) {
1467                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1468                 key.offset = parent;
1469         } else {
1470                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1471                 key.offset = root_objectid;
1472         }
1473
1474         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1475         btrfs_release_path(path);
1476         return ret;
1477 }
1478
1479 static inline int extent_ref_type(u64 parent, u64 owner)
1480 {
1481         int type;
1482         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1483                 if (parent > 0)
1484                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1485                 else
1486                         type = BTRFS_TREE_BLOCK_REF_KEY;
1487         } else {
1488                 if (parent > 0)
1489                         type = BTRFS_SHARED_DATA_REF_KEY;
1490                 else
1491                         type = BTRFS_EXTENT_DATA_REF_KEY;
1492         }
1493         return type;
1494 }
1495
1496 static int find_next_key(struct btrfs_path *path, int level,
1497                          struct btrfs_key *key)
1498
1499 {
1500         for (; level < BTRFS_MAX_LEVEL; level++) {
1501                 if (!path->nodes[level])
1502                         break;
1503                 if (path->slots[level] + 1 >=
1504                     btrfs_header_nritems(path->nodes[level]))
1505                         continue;
1506                 if (level == 0)
1507                         btrfs_item_key_to_cpu(path->nodes[level], key,
1508                                               path->slots[level] + 1);
1509                 else
1510                         btrfs_node_key_to_cpu(path->nodes[level], key,
1511                                               path->slots[level] + 1);
1512                 return 0;
1513         }
1514         return 1;
1515 }
1516
1517 /*
1518  * look for inline back ref. if back ref is found, *ref_ret is set
1519  * to the address of inline back ref, and 0 is returned.
1520  *
1521  * if back ref isn't found, *ref_ret is set to the address where it
1522  * should be inserted, and -ENOENT is returned.
1523  *
1524  * if insert is true and there are too many inline back refs, the path
1525  * points to the extent item, and -EAGAIN is returned.
1526  *
1527  * NOTE: inline back refs are ordered in the same way that back ref
1528  *       items in the tree are ordered.
1529  */
1530 static noinline_for_stack
1531 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1532                                  struct btrfs_root *root,
1533                                  struct btrfs_path *path,
1534                                  struct btrfs_extent_inline_ref **ref_ret,
1535                                  u64 bytenr, u64 num_bytes,
1536                                  u64 parent, u64 root_objectid,
1537                                  u64 owner, u64 offset, int insert)
1538 {
1539         struct btrfs_key key;
1540         struct extent_buffer *leaf;
1541         struct btrfs_extent_item *ei;
1542         struct btrfs_extent_inline_ref *iref;
1543         u64 flags;
1544         u64 item_size;
1545         unsigned long ptr;
1546         unsigned long end;
1547         int extra_size;
1548         int type;
1549         int want;
1550         int ret;
1551         int err = 0;
1552         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1553                                                  SKINNY_METADATA);
1554
1555         key.objectid = bytenr;
1556         key.type = BTRFS_EXTENT_ITEM_KEY;
1557         key.offset = num_bytes;
1558
1559         want = extent_ref_type(parent, owner);
1560         if (insert) {
1561                 extra_size = btrfs_extent_inline_ref_size(want);
1562                 path->keep_locks = 1;
1563         } else
1564                 extra_size = -1;
1565
1566         /*
1567          * Owner is our parent level, so we can just add one to get the level
1568          * for the block we are interested in.
1569          */
1570         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1571                 key.type = BTRFS_METADATA_ITEM_KEY;
1572                 key.offset = owner;
1573         }
1574
1575 again:
1576         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1577         if (ret < 0) {
1578                 err = ret;
1579                 goto out;
1580         }
1581
1582         /*
1583          * We may be a newly converted file system which still has the old fat
1584          * extent entries for metadata, so try and see if we have one of those.
1585          */
1586         if (ret > 0 && skinny_metadata) {
1587                 skinny_metadata = false;
1588                 if (path->slots[0]) {
1589                         path->slots[0]--;
1590                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1591                                               path->slots[0]);
1592                         if (key.objectid == bytenr &&
1593                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1594                             key.offset == num_bytes)
1595                                 ret = 0;
1596                 }
1597                 if (ret) {
1598                         key.objectid = bytenr;
1599                         key.type = BTRFS_EXTENT_ITEM_KEY;
1600                         key.offset = num_bytes;
1601                         btrfs_release_path(path);
1602                         goto again;
1603                 }
1604         }
1605
1606         if (ret && !insert) {
1607                 err = -ENOENT;
1608                 goto out;
1609         } else if (WARN_ON(ret)) {
1610                 err = -EIO;
1611                 goto out;
1612         }
1613
1614         leaf = path->nodes[0];
1615         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1616 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1617         if (item_size < sizeof(*ei)) {
1618                 if (!insert) {
1619                         err = -ENOENT;
1620                         goto out;
1621                 }
1622                 ret = convert_extent_item_v0(trans, root, path, owner,
1623                                              extra_size);
1624                 if (ret < 0) {
1625                         err = ret;
1626                         goto out;
1627                 }
1628                 leaf = path->nodes[0];
1629                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1630         }
1631 #endif
1632         BUG_ON(item_size < sizeof(*ei));
1633
1634         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1635         flags = btrfs_extent_flags(leaf, ei);
1636
1637         ptr = (unsigned long)(ei + 1);
1638         end = (unsigned long)ei + item_size;
1639
1640         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1641                 ptr += sizeof(struct btrfs_tree_block_info);
1642                 BUG_ON(ptr > end);
1643         }
1644
1645         err = -ENOENT;
1646         while (1) {
1647                 if (ptr >= end) {
1648                         WARN_ON(ptr > end);
1649                         break;
1650                 }
1651                 iref = (struct btrfs_extent_inline_ref *)ptr;
1652                 type = btrfs_extent_inline_ref_type(leaf, iref);
1653                 if (want < type)
1654                         break;
1655                 if (want > type) {
1656                         ptr += btrfs_extent_inline_ref_size(type);
1657                         continue;
1658                 }
1659
1660                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1661                         struct btrfs_extent_data_ref *dref;
1662                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1663                         if (match_extent_data_ref(leaf, dref, root_objectid,
1664                                                   owner, offset)) {
1665                                 err = 0;
1666                                 break;
1667                         }
1668                         if (hash_extent_data_ref_item(leaf, dref) <
1669                             hash_extent_data_ref(root_objectid, owner, offset))
1670                                 break;
1671                 } else {
1672                         u64 ref_offset;
1673                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1674                         if (parent > 0) {
1675                                 if (parent == ref_offset) {
1676                                         err = 0;
1677                                         break;
1678                                 }
1679                                 if (ref_offset < parent)
1680                                         break;
1681                         } else {
1682                                 if (root_objectid == ref_offset) {
1683                                         err = 0;
1684                                         break;
1685                                 }
1686                                 if (ref_offset < root_objectid)
1687                                         break;
1688                         }
1689                 }
1690                 ptr += btrfs_extent_inline_ref_size(type);
1691         }
1692         if (err == -ENOENT && insert) {
1693                 if (item_size + extra_size >=
1694                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1695                         err = -EAGAIN;
1696                         goto out;
1697                 }
1698                 /*
1699                  * To add new inline back ref, we have to make sure
1700                  * there is no corresponding back ref item.
1701                  * For simplicity, we just do not add new inline back
1702                  * ref if there is any kind of item for this block
1703                  */
1704                 if (find_next_key(path, 0, &key) == 0 &&
1705                     key.objectid == bytenr &&
1706                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1707                         err = -EAGAIN;
1708                         goto out;
1709                 }
1710         }
1711         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1712 out:
1713         if (insert) {
1714                 path->keep_locks = 0;
1715                 btrfs_unlock_up_safe(path, 1);
1716         }
1717         return err;
1718 }
1719
1720 /*
1721  * helper to add new inline back ref
1722  */
1723 static noinline_for_stack
1724 void setup_inline_extent_backref(struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref *iref,
1727                                  u64 parent, u64 root_objectid,
1728                                  u64 owner, u64 offset, int refs_to_add,
1729                                  struct btrfs_delayed_extent_op *extent_op)
1730 {
1731         struct extent_buffer *leaf;
1732         struct btrfs_extent_item *ei;
1733         unsigned long ptr;
1734         unsigned long end;
1735         unsigned long item_offset;
1736         u64 refs;
1737         int size;
1738         int type;
1739
1740         leaf = path->nodes[0];
1741         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1742         item_offset = (unsigned long)iref - (unsigned long)ei;
1743
1744         type = extent_ref_type(parent, owner);
1745         size = btrfs_extent_inline_ref_size(type);
1746
1747         btrfs_extend_item(root, path, size);
1748
1749         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1750         refs = btrfs_extent_refs(leaf, ei);
1751         refs += refs_to_add;
1752         btrfs_set_extent_refs(leaf, ei, refs);
1753         if (extent_op)
1754                 __run_delayed_extent_op(extent_op, leaf, ei);
1755
1756         ptr = (unsigned long)ei + item_offset;
1757         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1758         if (ptr < end - size)
1759                 memmove_extent_buffer(leaf, ptr + size, ptr,
1760                                       end - size - ptr);
1761
1762         iref = (struct btrfs_extent_inline_ref *)ptr;
1763         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1764         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1765                 struct btrfs_extent_data_ref *dref;
1766                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1767                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1768                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1769                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1770                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1771         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1772                 struct btrfs_shared_data_ref *sref;
1773                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1774                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1775                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1776         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1777                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1778         } else {
1779                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1780         }
1781         btrfs_mark_buffer_dirty(leaf);
1782 }
1783
1784 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1785                                  struct btrfs_root *root,
1786                                  struct btrfs_path *path,
1787                                  struct btrfs_extent_inline_ref **ref_ret,
1788                                  u64 bytenr, u64 num_bytes, u64 parent,
1789                                  u64 root_objectid, u64 owner, u64 offset)
1790 {
1791         int ret;
1792
1793         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1794                                            bytenr, num_bytes, parent,
1795                                            root_objectid, owner, offset, 0);
1796         if (ret != -ENOENT)
1797                 return ret;
1798
1799         btrfs_release_path(path);
1800         *ref_ret = NULL;
1801
1802         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1803                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1804                                             root_objectid);
1805         } else {
1806                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1807                                              root_objectid, owner, offset);
1808         }
1809         return ret;
1810 }
1811
1812 /*
1813  * helper to update/remove inline back ref
1814  */
1815 static noinline_for_stack
1816 void update_inline_extent_backref(struct btrfs_root *root,
1817                                   struct btrfs_path *path,
1818                                   struct btrfs_extent_inline_ref *iref,
1819                                   int refs_to_mod,
1820                                   struct btrfs_delayed_extent_op *extent_op,
1821                                   int *last_ref)
1822 {
1823         struct extent_buffer *leaf;
1824         struct btrfs_extent_item *ei;
1825         struct btrfs_extent_data_ref *dref = NULL;
1826         struct btrfs_shared_data_ref *sref = NULL;
1827         unsigned long ptr;
1828         unsigned long end;
1829         u32 item_size;
1830         int size;
1831         int type;
1832         u64 refs;
1833
1834         leaf = path->nodes[0];
1835         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836         refs = btrfs_extent_refs(leaf, ei);
1837         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1838         refs += refs_to_mod;
1839         btrfs_set_extent_refs(leaf, ei, refs);
1840         if (extent_op)
1841                 __run_delayed_extent_op(extent_op, leaf, ei);
1842
1843         type = btrfs_extent_inline_ref_type(leaf, iref);
1844
1845         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1846                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1847                 refs = btrfs_extent_data_ref_count(leaf, dref);
1848         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1849                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1850                 refs = btrfs_shared_data_ref_count(leaf, sref);
1851         } else {
1852                 refs = 1;
1853                 BUG_ON(refs_to_mod != -1);
1854         }
1855
1856         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1857         refs += refs_to_mod;
1858
1859         if (refs > 0) {
1860                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1861                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1862                 else
1863                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1864         } else {
1865                 *last_ref = 1;
1866                 size =  btrfs_extent_inline_ref_size(type);
1867                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1868                 ptr = (unsigned long)iref;
1869                 end = (unsigned long)ei + item_size;
1870                 if (ptr + size < end)
1871                         memmove_extent_buffer(leaf, ptr, ptr + size,
1872                                               end - ptr - size);
1873                 item_size -= size;
1874                 btrfs_truncate_item(root, path, item_size, 1);
1875         }
1876         btrfs_mark_buffer_dirty(leaf);
1877 }
1878
1879 static noinline_for_stack
1880 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1881                                  struct btrfs_root *root,
1882                                  struct btrfs_path *path,
1883                                  u64 bytenr, u64 num_bytes, u64 parent,
1884                                  u64 root_objectid, u64 owner,
1885                                  u64 offset, int refs_to_add,
1886                                  struct btrfs_delayed_extent_op *extent_op)
1887 {
1888         struct btrfs_extent_inline_ref *iref;
1889         int ret;
1890
1891         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1892                                            bytenr, num_bytes, parent,
1893                                            root_objectid, owner, offset, 1);
1894         if (ret == 0) {
1895                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1896                 update_inline_extent_backref(root, path, iref,
1897                                              refs_to_add, extent_op, NULL);
1898         } else if (ret == -ENOENT) {
1899                 setup_inline_extent_backref(root, path, iref, parent,
1900                                             root_objectid, owner, offset,
1901                                             refs_to_add, extent_op);
1902                 ret = 0;
1903         }
1904         return ret;
1905 }
1906
1907 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1908                                  struct btrfs_root *root,
1909                                  struct btrfs_path *path,
1910                                  u64 bytenr, u64 parent, u64 root_objectid,
1911                                  u64 owner, u64 offset, int refs_to_add)
1912 {
1913         int ret;
1914         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1915                 BUG_ON(refs_to_add != 1);
1916                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1917                                             parent, root_objectid);
1918         } else {
1919                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1920                                              parent, root_objectid,
1921                                              owner, offset, refs_to_add);
1922         }
1923         return ret;
1924 }
1925
1926 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1927                                  struct btrfs_root *root,
1928                                  struct btrfs_path *path,
1929                                  struct btrfs_extent_inline_ref *iref,
1930                                  int refs_to_drop, int is_data, int *last_ref)
1931 {
1932         int ret = 0;
1933
1934         BUG_ON(!is_data && refs_to_drop != 1);
1935         if (iref) {
1936                 update_inline_extent_backref(root, path, iref,
1937                                              -refs_to_drop, NULL, last_ref);
1938         } else if (is_data) {
1939                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1940                                              last_ref);
1941         } else {
1942                 *last_ref = 1;
1943                 ret = btrfs_del_item(trans, root, path);
1944         }
1945         return ret;
1946 }
1947
1948 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1949 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1950                                u64 *discarded_bytes)
1951 {
1952         int j, ret = 0;
1953         u64 bytes_left, end;
1954         u64 aligned_start = ALIGN(start, 1 << 9);
1955
1956         if (WARN_ON(start != aligned_start)) {
1957                 len -= aligned_start - start;
1958                 len = round_down(len, 1 << 9);
1959                 start = aligned_start;
1960         }
1961
1962         *discarded_bytes = 0;
1963
1964         if (!len)
1965                 return 0;
1966
1967         end = start + len;
1968         bytes_left = len;
1969
1970         /* Skip any superblocks on this device. */
1971         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1972                 u64 sb_start = btrfs_sb_offset(j);
1973                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1974                 u64 size = sb_start - start;
1975
1976                 if (!in_range(sb_start, start, bytes_left) &&
1977                     !in_range(sb_end, start, bytes_left) &&
1978                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1979                         continue;
1980
1981                 /*
1982                  * Superblock spans beginning of range.  Adjust start and
1983                  * try again.
1984                  */
1985                 if (sb_start <= start) {
1986                         start += sb_end - start;
1987                         if (start > end) {
1988                                 bytes_left = 0;
1989                                 break;
1990                         }
1991                         bytes_left = end - start;
1992                         continue;
1993                 }
1994
1995                 if (size) {
1996                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1997                                                    GFP_NOFS, 0);
1998                         if (!ret)
1999                                 *discarded_bytes += size;
2000                         else if (ret != -EOPNOTSUPP)
2001                                 return ret;
2002                 }
2003
2004                 start = sb_end;
2005                 if (start > end) {
2006                         bytes_left = 0;
2007                         break;
2008                 }
2009                 bytes_left = end - start;
2010         }
2011
2012         if (bytes_left) {
2013                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2014                                            GFP_NOFS, 0);
2015                 if (!ret)
2016                         *discarded_bytes += bytes_left;
2017         }
2018         return ret;
2019 }
2020
2021 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2022                          u64 num_bytes, u64 *actual_bytes)
2023 {
2024         int ret;
2025         u64 discarded_bytes = 0;
2026         struct btrfs_bio *bbio = NULL;
2027
2028
2029         /* Tell the block device(s) that the sectors can be discarded */
2030         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2031                               bytenr, &num_bytes, &bbio, 0);
2032         /* Error condition is -ENOMEM */
2033         if (!ret) {
2034                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2035                 int i;
2036
2037
2038                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2039                         u64 bytes;
2040                         if (!stripe->dev->can_discard)
2041                                 continue;
2042
2043                         ret = btrfs_issue_discard(stripe->dev->bdev,
2044                                                   stripe->physical,
2045                                                   stripe->length,
2046                                                   &bytes);
2047                         if (!ret)
2048                                 discarded_bytes += bytes;
2049                         else if (ret != -EOPNOTSUPP)
2050                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2051
2052                         /*
2053                          * Just in case we get back EOPNOTSUPP for some reason,
2054                          * just ignore the return value so we don't screw up
2055                          * people calling discard_extent.
2056                          */
2057                         ret = 0;
2058                 }
2059                 btrfs_put_bbio(bbio);
2060         }
2061
2062         if (actual_bytes)
2063                 *actual_bytes = discarded_bytes;
2064
2065
2066         if (ret == -EOPNOTSUPP)
2067                 ret = 0;
2068         return ret;
2069 }
2070
2071 /* Can return -ENOMEM */
2072 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2073                          struct btrfs_root *root,
2074                          u64 bytenr, u64 num_bytes, u64 parent,
2075                          u64 root_objectid, u64 owner, u64 offset)
2076 {
2077         int ret;
2078         struct btrfs_fs_info *fs_info = root->fs_info;
2079
2080         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2081                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2082
2083         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2084                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2085                                         num_bytes,
2086                                         parent, root_objectid, (int)owner,
2087                                         BTRFS_ADD_DELAYED_REF, NULL);
2088         } else {
2089                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2090                                         num_bytes, parent, root_objectid,
2091                                         owner, offset, 0,
2092                                         BTRFS_ADD_DELAYED_REF, NULL);
2093         }
2094         return ret;
2095 }
2096
2097 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2098                                   struct btrfs_root *root,
2099                                   struct btrfs_delayed_ref_node *node,
2100                                   u64 parent, u64 root_objectid,
2101                                   u64 owner, u64 offset, int refs_to_add,
2102                                   struct btrfs_delayed_extent_op *extent_op)
2103 {
2104         struct btrfs_fs_info *fs_info = root->fs_info;
2105         struct btrfs_path *path;
2106         struct extent_buffer *leaf;
2107         struct btrfs_extent_item *item;
2108         struct btrfs_key key;
2109         u64 bytenr = node->bytenr;
2110         u64 num_bytes = node->num_bytes;
2111         u64 refs;
2112         int ret;
2113
2114         path = btrfs_alloc_path();
2115         if (!path)
2116                 return -ENOMEM;
2117
2118         path->reada = 1;
2119         path->leave_spinning = 1;
2120         /* this will setup the path even if it fails to insert the back ref */
2121         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2122                                            bytenr, num_bytes, parent,
2123                                            root_objectid, owner, offset,
2124                                            refs_to_add, extent_op);
2125         if ((ret < 0 && ret != -EAGAIN) || !ret)
2126                 goto out;
2127
2128         /*
2129          * Ok we had -EAGAIN which means we didn't have space to insert and
2130          * inline extent ref, so just update the reference count and add a
2131          * normal backref.
2132          */
2133         leaf = path->nodes[0];
2134         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2135         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2136         refs = btrfs_extent_refs(leaf, item);
2137         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2138         if (extent_op)
2139                 __run_delayed_extent_op(extent_op, leaf, item);
2140
2141         btrfs_mark_buffer_dirty(leaf);
2142         btrfs_release_path(path);
2143
2144         path->reada = 1;
2145         path->leave_spinning = 1;
2146         /* now insert the actual backref */
2147         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2148                                     path, bytenr, parent, root_objectid,
2149                                     owner, offset, refs_to_add);
2150         if (ret)
2151                 btrfs_abort_transaction(trans, root, ret);
2152 out:
2153         btrfs_free_path(path);
2154         return ret;
2155 }
2156
2157 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2158                                 struct btrfs_root *root,
2159                                 struct btrfs_delayed_ref_node *node,
2160                                 struct btrfs_delayed_extent_op *extent_op,
2161                                 int insert_reserved)
2162 {
2163         int ret = 0;
2164         struct btrfs_delayed_data_ref *ref;
2165         struct btrfs_key ins;
2166         u64 parent = 0;
2167         u64 ref_root = 0;
2168         u64 flags = 0;
2169
2170         ins.objectid = node->bytenr;
2171         ins.offset = node->num_bytes;
2172         ins.type = BTRFS_EXTENT_ITEM_KEY;
2173
2174         ref = btrfs_delayed_node_to_data_ref(node);
2175         trace_run_delayed_data_ref(node, ref, node->action);
2176
2177         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2178                 parent = ref->parent;
2179         ref_root = ref->root;
2180
2181         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2182                 if (extent_op)
2183                         flags |= extent_op->flags_to_set;
2184                 ret = alloc_reserved_file_extent(trans, root,
2185                                                  parent, ref_root, flags,
2186                                                  ref->objectid, ref->offset,
2187                                                  &ins, node->ref_mod);
2188         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2189                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2190                                              ref_root, ref->objectid,
2191                                              ref->offset, node->ref_mod,
2192                                              extent_op);
2193         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2194                 ret = __btrfs_free_extent(trans, root, node, parent,
2195                                           ref_root, ref->objectid,
2196                                           ref->offset, node->ref_mod,
2197                                           extent_op);
2198         } else {
2199                 BUG();
2200         }
2201         return ret;
2202 }
2203
2204 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2205                                     struct extent_buffer *leaf,
2206                                     struct btrfs_extent_item *ei)
2207 {
2208         u64 flags = btrfs_extent_flags(leaf, ei);
2209         if (extent_op->update_flags) {
2210                 flags |= extent_op->flags_to_set;
2211                 btrfs_set_extent_flags(leaf, ei, flags);
2212         }
2213
2214         if (extent_op->update_key) {
2215                 struct btrfs_tree_block_info *bi;
2216                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2217                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2218                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2219         }
2220 }
2221
2222 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2223                                  struct btrfs_root *root,
2224                                  struct btrfs_delayed_ref_node *node,
2225                                  struct btrfs_delayed_extent_op *extent_op)
2226 {
2227         struct btrfs_key key;
2228         struct btrfs_path *path;
2229         struct btrfs_extent_item *ei;
2230         struct extent_buffer *leaf;
2231         u32 item_size;
2232         int ret;
2233         int err = 0;
2234         int metadata = !extent_op->is_data;
2235
2236         if (trans->aborted)
2237                 return 0;
2238
2239         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2240                 metadata = 0;
2241
2242         path = btrfs_alloc_path();
2243         if (!path)
2244                 return -ENOMEM;
2245
2246         key.objectid = node->bytenr;
2247
2248         if (metadata) {
2249                 key.type = BTRFS_METADATA_ITEM_KEY;
2250                 key.offset = extent_op->level;
2251         } else {
2252                 key.type = BTRFS_EXTENT_ITEM_KEY;
2253                 key.offset = node->num_bytes;
2254         }
2255
2256 again:
2257         path->reada = 1;
2258         path->leave_spinning = 1;
2259         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2260                                 path, 0, 1);
2261         if (ret < 0) {
2262                 err = ret;
2263                 goto out;
2264         }
2265         if (ret > 0) {
2266                 if (metadata) {
2267                         if (path->slots[0] > 0) {
2268                                 path->slots[0]--;
2269                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2270                                                       path->slots[0]);
2271                                 if (key.objectid == node->bytenr &&
2272                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2273                                     key.offset == node->num_bytes)
2274                                         ret = 0;
2275                         }
2276                         if (ret > 0) {
2277                                 btrfs_release_path(path);
2278                                 metadata = 0;
2279
2280                                 key.objectid = node->bytenr;
2281                                 key.offset = node->num_bytes;
2282                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2283                                 goto again;
2284                         }
2285                 } else {
2286                         err = -EIO;
2287                         goto out;
2288                 }
2289         }
2290
2291         leaf = path->nodes[0];
2292         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2294         if (item_size < sizeof(*ei)) {
2295                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2296                                              path, (u64)-1, 0);
2297                 if (ret < 0) {
2298                         err = ret;
2299                         goto out;
2300                 }
2301                 leaf = path->nodes[0];
2302                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2303         }
2304 #endif
2305         BUG_ON(item_size < sizeof(*ei));
2306         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2307         __run_delayed_extent_op(extent_op, leaf, ei);
2308
2309         btrfs_mark_buffer_dirty(leaf);
2310 out:
2311         btrfs_free_path(path);
2312         return err;
2313 }
2314
2315 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2316                                 struct btrfs_root *root,
2317                                 struct btrfs_delayed_ref_node *node,
2318                                 struct btrfs_delayed_extent_op *extent_op,
2319                                 int insert_reserved)
2320 {
2321         int ret = 0;
2322         struct btrfs_delayed_tree_ref *ref;
2323         struct btrfs_key ins;
2324         u64 parent = 0;
2325         u64 ref_root = 0;
2326         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2327                                                  SKINNY_METADATA);
2328
2329         ref = btrfs_delayed_node_to_tree_ref(node);
2330         trace_run_delayed_tree_ref(node, ref, node->action);
2331
2332         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2333                 parent = ref->parent;
2334         ref_root = ref->root;
2335
2336         ins.objectid = node->bytenr;
2337         if (skinny_metadata) {
2338                 ins.offset = ref->level;
2339                 ins.type = BTRFS_METADATA_ITEM_KEY;
2340         } else {
2341                 ins.offset = node->num_bytes;
2342                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2343         }
2344
2345         BUG_ON(node->ref_mod != 1);
2346         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2347                 BUG_ON(!extent_op || !extent_op->update_flags);
2348                 ret = alloc_reserved_tree_block(trans, root,
2349                                                 parent, ref_root,
2350                                                 extent_op->flags_to_set,
2351                                                 &extent_op->key,
2352                                                 ref->level, &ins);
2353         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2354                 ret = __btrfs_inc_extent_ref(trans, root, node,
2355                                              parent, ref_root,
2356                                              ref->level, 0, 1,
2357                                              extent_op);
2358         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2359                 ret = __btrfs_free_extent(trans, root, node,
2360                                           parent, ref_root,
2361                                           ref->level, 0, 1, extent_op);
2362         } else {
2363                 BUG();
2364         }
2365         return ret;
2366 }
2367
2368 /* helper function to actually process a single delayed ref entry */
2369 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2370                                struct btrfs_root *root,
2371                                struct btrfs_delayed_ref_node *node,
2372                                struct btrfs_delayed_extent_op *extent_op,
2373                                int insert_reserved)
2374 {
2375         int ret = 0;
2376
2377         if (trans->aborted) {
2378                 if (insert_reserved)
2379                         btrfs_pin_extent(root, node->bytenr,
2380                                          node->num_bytes, 1);
2381                 return 0;
2382         }
2383
2384         if (btrfs_delayed_ref_is_head(node)) {
2385                 struct btrfs_delayed_ref_head *head;
2386                 /*
2387                  * we've hit the end of the chain and we were supposed
2388                  * to insert this extent into the tree.  But, it got
2389                  * deleted before we ever needed to insert it, so all
2390                  * we have to do is clean up the accounting
2391                  */
2392                 BUG_ON(extent_op);
2393                 head = btrfs_delayed_node_to_head(node);
2394                 trace_run_delayed_ref_head(node, head, node->action);
2395
2396                 if (insert_reserved) {
2397                         btrfs_pin_extent(root, node->bytenr,
2398                                          node->num_bytes, 1);
2399                         if (head->is_data) {
2400                                 ret = btrfs_del_csums(trans, root,
2401                                                       node->bytenr,
2402                                                       node->num_bytes);
2403                         }
2404                 }
2405
2406                 /* Also free its reserved qgroup space */
2407                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2408                                               head->qgroup_ref_root,
2409                                               head->qgroup_reserved);
2410                 return ret;
2411         }
2412
2413         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2414             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2415                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2416                                            insert_reserved);
2417         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2418                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2419                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2420                                            insert_reserved);
2421         else
2422                 BUG();
2423         return ret;
2424 }
2425
2426 static inline struct btrfs_delayed_ref_node *
2427 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2428 {
2429         struct btrfs_delayed_ref_node *ref;
2430
2431         if (list_empty(&head->ref_list))
2432                 return NULL;
2433
2434         /*
2435          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2436          * This is to prevent a ref count from going down to zero, which deletes
2437          * the extent item from the extent tree, when there still are references
2438          * to add, which would fail because they would not find the extent item.
2439          */
2440         list_for_each_entry(ref, &head->ref_list, list) {
2441                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2442                         return ref;
2443         }
2444
2445         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2446                           list);
2447 }
2448
2449 /*
2450  * Returns 0 on success or if called with an already aborted transaction.
2451  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2452  */
2453 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2454                                              struct btrfs_root *root,
2455                                              unsigned long nr)
2456 {
2457         struct btrfs_delayed_ref_root *delayed_refs;
2458         struct btrfs_delayed_ref_node *ref;
2459         struct btrfs_delayed_ref_head *locked_ref = NULL;
2460         struct btrfs_delayed_extent_op *extent_op;
2461         struct btrfs_fs_info *fs_info = root->fs_info;
2462         ktime_t start = ktime_get();
2463         int ret;
2464         unsigned long count = 0;
2465         unsigned long actual_count = 0;
2466         int must_insert_reserved = 0;
2467
2468         delayed_refs = &trans->transaction->delayed_refs;
2469         while (1) {
2470                 if (!locked_ref) {
2471                         if (count >= nr)
2472                                 break;
2473
2474                         spin_lock(&delayed_refs->lock);
2475                         locked_ref = btrfs_select_ref_head(trans);
2476                         if (!locked_ref) {
2477                                 spin_unlock(&delayed_refs->lock);
2478                                 break;
2479                         }
2480
2481                         /* grab the lock that says we are going to process
2482                          * all the refs for this head */
2483                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2484                         spin_unlock(&delayed_refs->lock);
2485                         /*
2486                          * we may have dropped the spin lock to get the head
2487                          * mutex lock, and that might have given someone else
2488                          * time to free the head.  If that's true, it has been
2489                          * removed from our list and we can move on.
2490                          */
2491                         if (ret == -EAGAIN) {
2492                                 locked_ref = NULL;
2493                                 count++;
2494                                 continue;
2495                         }
2496                 }
2497
2498                 /*
2499                  * We need to try and merge add/drops of the same ref since we
2500                  * can run into issues with relocate dropping the implicit ref
2501                  * and then it being added back again before the drop can
2502                  * finish.  If we merged anything we need to re-loop so we can
2503                  * get a good ref.
2504                  * Or we can get node references of the same type that weren't
2505                  * merged when created due to bumps in the tree mod seq, and
2506                  * we need to merge them to prevent adding an inline extent
2507                  * backref before dropping it (triggering a BUG_ON at
2508                  * insert_inline_extent_backref()).
2509                  */
2510                 spin_lock(&locked_ref->lock);
2511                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2512                                          locked_ref);
2513
2514                 /*
2515                  * locked_ref is the head node, so we have to go one
2516                  * node back for any delayed ref updates
2517                  */
2518                 ref = select_delayed_ref(locked_ref);
2519
2520                 if (ref && ref->seq &&
2521                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2522                         spin_unlock(&locked_ref->lock);
2523                         btrfs_delayed_ref_unlock(locked_ref);
2524                         spin_lock(&delayed_refs->lock);
2525                         locked_ref->processing = 0;
2526                         delayed_refs->num_heads_ready++;
2527                         spin_unlock(&delayed_refs->lock);
2528                         locked_ref = NULL;
2529                         cond_resched();
2530                         count++;
2531                         continue;
2532                 }
2533
2534                 /*
2535                  * record the must insert reserved flag before we
2536                  * drop the spin lock.
2537                  */
2538                 must_insert_reserved = locked_ref->must_insert_reserved;
2539                 locked_ref->must_insert_reserved = 0;
2540
2541                 extent_op = locked_ref->extent_op;
2542                 locked_ref->extent_op = NULL;
2543
2544                 if (!ref) {
2545
2546
2547                         /* All delayed refs have been processed, Go ahead
2548                          * and send the head node to run_one_delayed_ref,
2549                          * so that any accounting fixes can happen
2550                          */
2551                         ref = &locked_ref->node;
2552
2553                         if (extent_op && must_insert_reserved) {
2554                                 btrfs_free_delayed_extent_op(extent_op);
2555                                 extent_op = NULL;
2556                         }
2557
2558                         if (extent_op) {
2559                                 spin_unlock(&locked_ref->lock);
2560                                 ret = run_delayed_extent_op(trans, root,
2561                                                             ref, extent_op);
2562                                 btrfs_free_delayed_extent_op(extent_op);
2563
2564                                 if (ret) {
2565                                         /*
2566                                          * Need to reset must_insert_reserved if
2567                                          * there was an error so the abort stuff
2568                                          * can cleanup the reserved space
2569                                          * properly.
2570                                          */
2571                                         if (must_insert_reserved)
2572                                                 locked_ref->must_insert_reserved = 1;
2573                                         locked_ref->processing = 0;
2574                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2575                                         btrfs_delayed_ref_unlock(locked_ref);
2576                                         return ret;
2577                                 }
2578                                 continue;
2579                         }
2580
2581                         /*
2582                          * Need to drop our head ref lock and re-aqcuire the
2583                          * delayed ref lock and then re-check to make sure
2584                          * nobody got added.
2585                          */
2586                         spin_unlock(&locked_ref->lock);
2587                         spin_lock(&delayed_refs->lock);
2588                         spin_lock(&locked_ref->lock);
2589                         if (!list_empty(&locked_ref->ref_list) ||
2590                             locked_ref->extent_op) {
2591                                 spin_unlock(&locked_ref->lock);
2592                                 spin_unlock(&delayed_refs->lock);
2593                                 continue;
2594                         }
2595                         ref->in_tree = 0;
2596                         delayed_refs->num_heads--;
2597                         rb_erase(&locked_ref->href_node,
2598                                  &delayed_refs->href_root);
2599                         spin_unlock(&delayed_refs->lock);
2600                 } else {
2601                         actual_count++;
2602                         ref->in_tree = 0;
2603                         list_del(&ref->list);
2604                 }
2605                 atomic_dec(&delayed_refs->num_entries);
2606
2607                 if (!btrfs_delayed_ref_is_head(ref)) {
2608                         /*
2609                          * when we play the delayed ref, also correct the
2610                          * ref_mod on head
2611                          */
2612                         switch (ref->action) {
2613                         case BTRFS_ADD_DELAYED_REF:
2614                         case BTRFS_ADD_DELAYED_EXTENT:
2615                                 locked_ref->node.ref_mod -= ref->ref_mod;
2616                                 break;
2617                         case BTRFS_DROP_DELAYED_REF:
2618                                 locked_ref->node.ref_mod += ref->ref_mod;
2619                                 break;
2620                         default:
2621                                 WARN_ON(1);
2622                         }
2623                 }
2624                 spin_unlock(&locked_ref->lock);
2625
2626                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2627                                           must_insert_reserved);
2628
2629                 btrfs_free_delayed_extent_op(extent_op);
2630                 if (ret) {
2631                         locked_ref->processing = 0;
2632                         btrfs_delayed_ref_unlock(locked_ref);
2633                         btrfs_put_delayed_ref(ref);
2634                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2635                         return ret;
2636                 }
2637
2638                 /*
2639                  * If this node is a head, that means all the refs in this head
2640                  * have been dealt with, and we will pick the next head to deal
2641                  * with, so we must unlock the head and drop it from the cluster
2642                  * list before we release it.
2643                  */
2644                 if (btrfs_delayed_ref_is_head(ref)) {
2645                         if (locked_ref->is_data &&
2646                             locked_ref->total_ref_mod < 0) {
2647                                 spin_lock(&delayed_refs->lock);
2648                                 delayed_refs->pending_csums -= ref->num_bytes;
2649                                 spin_unlock(&delayed_refs->lock);
2650                         }
2651                         btrfs_delayed_ref_unlock(locked_ref);
2652                         locked_ref = NULL;
2653                 }
2654                 btrfs_put_delayed_ref(ref);
2655                 count++;
2656                 cond_resched();
2657         }
2658
2659         /*
2660          * We don't want to include ref heads since we can have empty ref heads
2661          * and those will drastically skew our runtime down since we just do
2662          * accounting, no actual extent tree updates.
2663          */
2664         if (actual_count > 0) {
2665                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2666                 u64 avg;
2667
2668                 /*
2669                  * We weigh the current average higher than our current runtime
2670                  * to avoid large swings in the average.
2671                  */
2672                 spin_lock(&delayed_refs->lock);
2673                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2674                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2675                 spin_unlock(&delayed_refs->lock);
2676         }
2677         return 0;
2678 }
2679
2680 #ifdef SCRAMBLE_DELAYED_REFS
2681 /*
2682  * Normally delayed refs get processed in ascending bytenr order. This
2683  * correlates in most cases to the order added. To expose dependencies on this
2684  * order, we start to process the tree in the middle instead of the beginning
2685  */
2686 static u64 find_middle(struct rb_root *root)
2687 {
2688         struct rb_node *n = root->rb_node;
2689         struct btrfs_delayed_ref_node *entry;
2690         int alt = 1;
2691         u64 middle;
2692         u64 first = 0, last = 0;
2693
2694         n = rb_first(root);
2695         if (n) {
2696                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2697                 first = entry->bytenr;
2698         }
2699         n = rb_last(root);
2700         if (n) {
2701                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2702                 last = entry->bytenr;
2703         }
2704         n = root->rb_node;
2705
2706         while (n) {
2707                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2708                 WARN_ON(!entry->in_tree);
2709
2710                 middle = entry->bytenr;
2711
2712                 if (alt)
2713                         n = n->rb_left;
2714                 else
2715                         n = n->rb_right;
2716
2717                 alt = 1 - alt;
2718         }
2719         return middle;
2720 }
2721 #endif
2722
2723 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2724 {
2725         u64 num_bytes;
2726
2727         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2728                              sizeof(struct btrfs_extent_inline_ref));
2729         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2730                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2731
2732         /*
2733          * We don't ever fill up leaves all the way so multiply by 2 just to be
2734          * closer to what we're really going to want to ouse.
2735          */
2736         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2737 }
2738
2739 /*
2740  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2741  * would require to store the csums for that many bytes.
2742  */
2743 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2744 {
2745         u64 csum_size;
2746         u64 num_csums_per_leaf;
2747         u64 num_csums;
2748
2749         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2750         num_csums_per_leaf = div64_u64(csum_size,
2751                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2752         num_csums = div64_u64(csum_bytes, root->sectorsize);
2753         num_csums += num_csums_per_leaf - 1;
2754         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2755         return num_csums;
2756 }
2757
2758 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2759                                        struct btrfs_root *root)
2760 {
2761         struct btrfs_block_rsv *global_rsv;
2762         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2763         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2764         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2765         u64 num_bytes, num_dirty_bgs_bytes;
2766         int ret = 0;
2767
2768         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2769         num_heads = heads_to_leaves(root, num_heads);
2770         if (num_heads > 1)
2771                 num_bytes += (num_heads - 1) * root->nodesize;
2772         num_bytes <<= 1;
2773         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2774         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2775                                                              num_dirty_bgs);
2776         global_rsv = &root->fs_info->global_block_rsv;
2777
2778         /*
2779          * If we can't allocate any more chunks lets make sure we have _lots_ of
2780          * wiggle room since running delayed refs can create more delayed refs.
2781          */
2782         if (global_rsv->space_info->full) {
2783                 num_dirty_bgs_bytes <<= 1;
2784                 num_bytes <<= 1;
2785         }
2786
2787         spin_lock(&global_rsv->lock);
2788         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2789                 ret = 1;
2790         spin_unlock(&global_rsv->lock);
2791         return ret;
2792 }
2793
2794 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2795                                        struct btrfs_root *root)
2796 {
2797         struct btrfs_fs_info *fs_info = root->fs_info;
2798         u64 num_entries =
2799                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2800         u64 avg_runtime;
2801         u64 val;
2802
2803         smp_mb();
2804         avg_runtime = fs_info->avg_delayed_ref_runtime;
2805         val = num_entries * avg_runtime;
2806         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2807                 return 1;
2808         if (val >= NSEC_PER_SEC / 2)
2809                 return 2;
2810
2811         return btrfs_check_space_for_delayed_refs(trans, root);
2812 }
2813
2814 struct async_delayed_refs {
2815         struct btrfs_root *root;
2816         int count;
2817         int error;
2818         int sync;
2819         struct completion wait;
2820         struct btrfs_work work;
2821 };
2822
2823 static void delayed_ref_async_start(struct btrfs_work *work)
2824 {
2825         struct async_delayed_refs *async;
2826         struct btrfs_trans_handle *trans;
2827         int ret;
2828
2829         async = container_of(work, struct async_delayed_refs, work);
2830
2831         trans = btrfs_join_transaction(async->root);
2832         if (IS_ERR(trans)) {
2833                 async->error = PTR_ERR(trans);
2834                 goto done;
2835         }
2836
2837         /*
2838          * trans->sync means that when we call end_transaciton, we won't
2839          * wait on delayed refs
2840          */
2841         trans->sync = true;
2842         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2843         if (ret)
2844                 async->error = ret;
2845
2846         ret = btrfs_end_transaction(trans, async->root);
2847         if (ret && !async->error)
2848                 async->error = ret;
2849 done:
2850         if (async->sync)
2851                 complete(&async->wait);
2852         else
2853                 kfree(async);
2854 }
2855
2856 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2857                                  unsigned long count, int wait)
2858 {
2859         struct async_delayed_refs *async;
2860         int ret;
2861
2862         async = kmalloc(sizeof(*async), GFP_NOFS);
2863         if (!async)
2864                 return -ENOMEM;
2865
2866         async->root = root->fs_info->tree_root;
2867         async->count = count;
2868         async->error = 0;
2869         if (wait)
2870                 async->sync = 1;
2871         else
2872                 async->sync = 0;
2873         init_completion(&async->wait);
2874
2875         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2876                         delayed_ref_async_start, NULL, NULL);
2877
2878         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2879
2880         if (wait) {
2881                 wait_for_completion(&async->wait);
2882                 ret = async->error;
2883                 kfree(async);
2884                 return ret;
2885         }
2886         return 0;
2887 }
2888
2889 /*
2890  * this starts processing the delayed reference count updates and
2891  * extent insertions we have queued up so far.  count can be
2892  * 0, which means to process everything in the tree at the start
2893  * of the run (but not newly added entries), or it can be some target
2894  * number you'd like to process.
2895  *
2896  * Returns 0 on success or if called with an aborted transaction
2897  * Returns <0 on error and aborts the transaction
2898  */
2899 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2900                            struct btrfs_root *root, unsigned long count)
2901 {
2902         struct rb_node *node;
2903         struct btrfs_delayed_ref_root *delayed_refs;
2904         struct btrfs_delayed_ref_head *head;
2905         int ret;
2906         int run_all = count == (unsigned long)-1;
2907         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2908
2909         /* We'll clean this up in btrfs_cleanup_transaction */
2910         if (trans->aborted)
2911                 return 0;
2912
2913         if (root == root->fs_info->extent_root)
2914                 root = root->fs_info->tree_root;
2915
2916         delayed_refs = &trans->transaction->delayed_refs;
2917         if (count == 0)
2918                 count = atomic_read(&delayed_refs->num_entries) * 2;
2919
2920 again:
2921 #ifdef SCRAMBLE_DELAYED_REFS
2922         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2923 #endif
2924         trans->can_flush_pending_bgs = false;
2925         ret = __btrfs_run_delayed_refs(trans, root, count);
2926         if (ret < 0) {
2927                 btrfs_abort_transaction(trans, root, ret);
2928                 return ret;
2929         }
2930
2931         if (run_all) {
2932                 if (!list_empty(&trans->new_bgs))
2933                         btrfs_create_pending_block_groups(trans, root);
2934
2935                 spin_lock(&delayed_refs->lock);
2936                 node = rb_first(&delayed_refs->href_root);
2937                 if (!node) {
2938                         spin_unlock(&delayed_refs->lock);
2939                         goto out;
2940                 }
2941                 count = (unsigned long)-1;
2942
2943                 while (node) {
2944                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2945                                         href_node);
2946                         if (btrfs_delayed_ref_is_head(&head->node)) {
2947                                 struct btrfs_delayed_ref_node *ref;
2948
2949                                 ref = &head->node;
2950                                 atomic_inc(&ref->refs);
2951
2952                                 spin_unlock(&delayed_refs->lock);
2953                                 /*
2954                                  * Mutex was contended, block until it's
2955                                  * released and try again
2956                                  */
2957                                 mutex_lock(&head->mutex);
2958                                 mutex_unlock(&head->mutex);
2959
2960                                 btrfs_put_delayed_ref(ref);
2961                                 cond_resched();
2962                                 goto again;
2963                         } else {
2964                                 WARN_ON(1);
2965                         }
2966                         node = rb_next(node);
2967                 }
2968                 spin_unlock(&delayed_refs->lock);
2969                 cond_resched();
2970                 goto again;
2971         }
2972 out:
2973         assert_qgroups_uptodate(trans);
2974         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2975         return 0;
2976 }
2977
2978 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2979                                 struct btrfs_root *root,
2980                                 u64 bytenr, u64 num_bytes, u64 flags,
2981                                 int level, int is_data)
2982 {
2983         struct btrfs_delayed_extent_op *extent_op;
2984         int ret;
2985
2986         extent_op = btrfs_alloc_delayed_extent_op();
2987         if (!extent_op)
2988                 return -ENOMEM;
2989
2990         extent_op->flags_to_set = flags;
2991         extent_op->update_flags = 1;
2992         extent_op->update_key = 0;
2993         extent_op->is_data = is_data ? 1 : 0;
2994         extent_op->level = level;
2995
2996         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2997                                           num_bytes, extent_op);
2998         if (ret)
2999                 btrfs_free_delayed_extent_op(extent_op);
3000         return ret;
3001 }
3002
3003 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3004                                       struct btrfs_root *root,
3005                                       struct btrfs_path *path,
3006                                       u64 objectid, u64 offset, u64 bytenr)
3007 {
3008         struct btrfs_delayed_ref_head *head;
3009         struct btrfs_delayed_ref_node *ref;
3010         struct btrfs_delayed_data_ref *data_ref;
3011         struct btrfs_delayed_ref_root *delayed_refs;
3012         int ret = 0;
3013
3014         delayed_refs = &trans->transaction->delayed_refs;
3015         spin_lock(&delayed_refs->lock);
3016         head = btrfs_find_delayed_ref_head(trans, bytenr);
3017         if (!head) {
3018                 spin_unlock(&delayed_refs->lock);
3019                 return 0;
3020         }
3021
3022         if (!mutex_trylock(&head->mutex)) {
3023                 atomic_inc(&head->node.refs);
3024                 spin_unlock(&delayed_refs->lock);
3025
3026                 btrfs_release_path(path);
3027
3028                 /*
3029                  * Mutex was contended, block until it's released and let
3030                  * caller try again
3031                  */
3032                 mutex_lock(&head->mutex);
3033                 mutex_unlock(&head->mutex);
3034                 btrfs_put_delayed_ref(&head->node);
3035                 return -EAGAIN;
3036         }
3037         spin_unlock(&delayed_refs->lock);
3038
3039         spin_lock(&head->lock);
3040         list_for_each_entry(ref, &head->ref_list, list) {
3041                 /* If it's a shared ref we know a cross reference exists */
3042                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3043                         ret = 1;
3044                         break;
3045                 }
3046
3047                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3048
3049                 /*
3050                  * If our ref doesn't match the one we're currently looking at
3051                  * then we have a cross reference.
3052                  */
3053                 if (data_ref->root != root->root_key.objectid ||
3054                     data_ref->objectid != objectid ||
3055                     data_ref->offset != offset) {
3056                         ret = 1;
3057                         break;
3058                 }
3059         }
3060         spin_unlock(&head->lock);
3061         mutex_unlock(&head->mutex);
3062         return ret;
3063 }
3064
3065 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3066                                         struct btrfs_root *root,
3067                                         struct btrfs_path *path,
3068                                         u64 objectid, u64 offset, u64 bytenr)
3069 {
3070         struct btrfs_root *extent_root = root->fs_info->extent_root;
3071         struct extent_buffer *leaf;
3072         struct btrfs_extent_data_ref *ref;
3073         struct btrfs_extent_inline_ref *iref;
3074         struct btrfs_extent_item *ei;
3075         struct btrfs_key key;
3076         u32 item_size;
3077         int ret;
3078
3079         key.objectid = bytenr;
3080         key.offset = (u64)-1;
3081         key.type = BTRFS_EXTENT_ITEM_KEY;
3082
3083         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3084         if (ret < 0)
3085                 goto out;
3086         BUG_ON(ret == 0); /* Corruption */
3087
3088         ret = -ENOENT;
3089         if (path->slots[0] == 0)
3090                 goto out;
3091
3092         path->slots[0]--;
3093         leaf = path->nodes[0];
3094         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3095
3096         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3097                 goto out;
3098
3099         ret = 1;
3100         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3101 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3102         if (item_size < sizeof(*ei)) {
3103                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3104                 goto out;
3105         }
3106 #endif
3107         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3108
3109         if (item_size != sizeof(*ei) +
3110             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3111                 goto out;
3112
3113         if (btrfs_extent_generation(leaf, ei) <=
3114             btrfs_root_last_snapshot(&root->root_item))
3115                 goto out;
3116
3117         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3118         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3119             BTRFS_EXTENT_DATA_REF_KEY)
3120                 goto out;
3121
3122         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3123         if (btrfs_extent_refs(leaf, ei) !=
3124             btrfs_extent_data_ref_count(leaf, ref) ||
3125             btrfs_extent_data_ref_root(leaf, ref) !=
3126             root->root_key.objectid ||
3127             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3128             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3129                 goto out;
3130
3131         ret = 0;
3132 out:
3133         return ret;
3134 }
3135
3136 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3137                           struct btrfs_root *root,
3138                           u64 objectid, u64 offset, u64 bytenr)
3139 {
3140         struct btrfs_path *path;
3141         int ret;
3142         int ret2;
3143
3144         path = btrfs_alloc_path();
3145         if (!path)
3146                 return -ENOENT;
3147
3148         do {
3149                 ret = check_committed_ref(trans, root, path, objectid,
3150                                           offset, bytenr);
3151                 if (ret && ret != -ENOENT)
3152                         goto out;
3153
3154                 ret2 = check_delayed_ref(trans, root, path, objectid,
3155                                          offset, bytenr);
3156         } while (ret2 == -EAGAIN);
3157
3158         if (ret2 && ret2 != -ENOENT) {
3159                 ret = ret2;
3160                 goto out;
3161         }
3162
3163         if (ret != -ENOENT || ret2 != -ENOENT)
3164                 ret = 0;
3165 out:
3166         btrfs_free_path(path);
3167         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3168                 WARN_ON(ret > 0);
3169         return ret;
3170 }
3171
3172 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3173                            struct btrfs_root *root,
3174                            struct extent_buffer *buf,
3175                            int full_backref, int inc)
3176 {
3177         u64 bytenr;
3178         u64 num_bytes;
3179         u64 parent;
3180         u64 ref_root;
3181         u32 nritems;
3182         struct btrfs_key key;
3183         struct btrfs_file_extent_item *fi;
3184         int i;
3185         int level;
3186         int ret = 0;
3187         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3188                             u64, u64, u64, u64, u64, u64);
3189
3190
3191         if (btrfs_test_is_dummy_root(root))
3192                 return 0;
3193
3194         ref_root = btrfs_header_owner(buf);
3195         nritems = btrfs_header_nritems(buf);
3196         level = btrfs_header_level(buf);
3197
3198         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3199                 return 0;
3200
3201         if (inc)
3202                 process_func = btrfs_inc_extent_ref;
3203         else
3204                 process_func = btrfs_free_extent;
3205
3206         if (full_backref)
3207                 parent = buf->start;
3208         else
3209                 parent = 0;
3210
3211         for (i = 0; i < nritems; i++) {
3212                 if (level == 0) {
3213                         btrfs_item_key_to_cpu(buf, &key, i);
3214                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3215                                 continue;
3216                         fi = btrfs_item_ptr(buf, i,
3217                                             struct btrfs_file_extent_item);
3218                         if (btrfs_file_extent_type(buf, fi) ==
3219                             BTRFS_FILE_EXTENT_INLINE)
3220                                 continue;
3221                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3222                         if (bytenr == 0)
3223                                 continue;
3224
3225                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3226                         key.offset -= btrfs_file_extent_offset(buf, fi);
3227                         ret = process_func(trans, root, bytenr, num_bytes,
3228                                            parent, ref_root, key.objectid,
3229                                            key.offset);
3230                         if (ret)
3231                                 goto fail;
3232                 } else {
3233                         bytenr = btrfs_node_blockptr(buf, i);
3234                         num_bytes = root->nodesize;
3235                         ret = process_func(trans, root, bytenr, num_bytes,
3236                                            parent, ref_root, level - 1, 0);
3237                         if (ret)
3238                                 goto fail;
3239                 }
3240         }
3241         return 0;
3242 fail:
3243         return ret;
3244 }
3245
3246 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3247                   struct extent_buffer *buf, int full_backref)
3248 {
3249         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3250 }
3251
3252 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3253                   struct extent_buffer *buf, int full_backref)
3254 {
3255         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3256 }
3257
3258 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3259                                  struct btrfs_root *root,
3260                                  struct btrfs_path *path,
3261                                  struct btrfs_block_group_cache *cache)
3262 {
3263         int ret;
3264         struct btrfs_root *extent_root = root->fs_info->extent_root;
3265         unsigned long bi;
3266         struct extent_buffer *leaf;
3267
3268         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3269         if (ret) {
3270                 if (ret > 0)
3271                         ret = -ENOENT;
3272                 goto fail;
3273         }
3274
3275         leaf = path->nodes[0];
3276         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3277         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3278         btrfs_mark_buffer_dirty(leaf);
3279 fail:
3280         btrfs_release_path(path);
3281         return ret;
3282
3283 }
3284
3285 static struct btrfs_block_group_cache *
3286 next_block_group(struct btrfs_root *root,
3287                  struct btrfs_block_group_cache *cache)
3288 {
3289         struct rb_node *node;
3290
3291         spin_lock(&root->fs_info->block_group_cache_lock);
3292
3293         /* If our block group was removed, we need a full search. */
3294         if (RB_EMPTY_NODE(&cache->cache_node)) {
3295                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3296
3297                 spin_unlock(&root->fs_info->block_group_cache_lock);
3298                 btrfs_put_block_group(cache);
3299                 cache = btrfs_lookup_first_block_group(root->fs_info,
3300                                                        next_bytenr);
3301                 return cache;
3302         }
3303         node = rb_next(&cache->cache_node);
3304         btrfs_put_block_group(cache);
3305         if (node) {
3306                 cache = rb_entry(node, struct btrfs_block_group_cache,
3307                                  cache_node);
3308                 btrfs_get_block_group(cache);
3309         } else
3310                 cache = NULL;
3311         spin_unlock(&root->fs_info->block_group_cache_lock);
3312         return cache;
3313 }
3314
3315 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3316                             struct btrfs_trans_handle *trans,
3317                             struct btrfs_path *path)
3318 {
3319         struct btrfs_root *root = block_group->fs_info->tree_root;
3320         struct inode *inode = NULL;
3321         u64 alloc_hint = 0;
3322         int dcs = BTRFS_DC_ERROR;
3323         u64 num_pages = 0;
3324         int retries = 0;
3325         int ret = 0;
3326
3327         /*
3328          * If this block group is smaller than 100 megs don't bother caching the
3329          * block group.
3330          */
3331         if (block_group->key.offset < (100 * SZ_1M)) {
3332                 spin_lock(&block_group->lock);
3333                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3334                 spin_unlock(&block_group->lock);
3335                 return 0;
3336         }
3337
3338         if (trans->aborted)
3339                 return 0;
3340 again:
3341         inode = lookup_free_space_inode(root, block_group, path);
3342         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3343                 ret = PTR_ERR(inode);
3344                 btrfs_release_path(path);
3345                 goto out;
3346         }
3347
3348         if (IS_ERR(inode)) {
3349                 BUG_ON(retries);
3350                 retries++;
3351
3352                 if (block_group->ro)
3353                         goto out_free;
3354
3355                 ret = create_free_space_inode(root, trans, block_group, path);
3356                 if (ret)
3357                         goto out_free;
3358                 goto again;
3359         }
3360
3361         /* We've already setup this transaction, go ahead and exit */
3362         if (block_group->cache_generation == trans->transid &&
3363             i_size_read(inode)) {
3364                 dcs = BTRFS_DC_SETUP;
3365                 goto out_put;
3366         }
3367
3368         /*
3369          * We want to set the generation to 0, that way if anything goes wrong
3370          * from here on out we know not to trust this cache when we load up next
3371          * time.
3372          */
3373         BTRFS_I(inode)->generation = 0;
3374         ret = btrfs_update_inode(trans, root, inode);
3375         if (ret) {
3376                 /*
3377                  * So theoretically we could recover from this, simply set the
3378                  * super cache generation to 0 so we know to invalidate the
3379                  * cache, but then we'd have to keep track of the block groups
3380                  * that fail this way so we know we _have_ to reset this cache
3381                  * before the next commit or risk reading stale cache.  So to
3382                  * limit our exposure to horrible edge cases lets just abort the
3383                  * transaction, this only happens in really bad situations
3384                  * anyway.
3385                  */
3386                 btrfs_abort_transaction(trans, root, ret);
3387                 goto out_put;
3388         }
3389         WARN_ON(ret);
3390
3391         if (i_size_read(inode) > 0) {
3392                 ret = btrfs_check_trunc_cache_free_space(root,
3393                                         &root->fs_info->global_block_rsv);
3394                 if (ret)
3395                         goto out_put;
3396
3397                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3398                 if (ret)
3399                         goto out_put;
3400         }
3401
3402         spin_lock(&block_group->lock);
3403         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3404             !btrfs_test_opt(root, SPACE_CACHE)) {
3405                 /*
3406                  * don't bother trying to write stuff out _if_
3407                  * a) we're not cached,
3408                  * b) we're with nospace_cache mount option.
3409                  */
3410                 dcs = BTRFS_DC_WRITTEN;
3411                 spin_unlock(&block_group->lock);
3412                 goto out_put;
3413         }
3414         spin_unlock(&block_group->lock);
3415
3416         /*
3417          * We hit an ENOSPC when setting up the cache in this transaction, just
3418          * skip doing the setup, we've already cleared the cache so we're safe.
3419          */
3420         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3421                 ret = -ENOSPC;
3422                 goto out_put;
3423         }
3424
3425         /*
3426          * Try to preallocate enough space based on how big the block group is.
3427          * Keep in mind this has to include any pinned space which could end up
3428          * taking up quite a bit since it's not folded into the other space
3429          * cache.
3430          */
3431         num_pages = div_u64(block_group->key.offset, SZ_256M);
3432         if (!num_pages)
3433                 num_pages = 1;
3434
3435         num_pages *= 16;
3436         num_pages *= PAGE_CACHE_SIZE;
3437
3438         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3439         if (ret)
3440                 goto out_put;
3441
3442         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3443                                               num_pages, num_pages,
3444                                               &alloc_hint);
3445         /*
3446          * Our cache requires contiguous chunks so that we don't modify a bunch
3447          * of metadata or split extents when writing the cache out, which means
3448          * we can enospc if we are heavily fragmented in addition to just normal
3449          * out of space conditions.  So if we hit this just skip setting up any
3450          * other block groups for this transaction, maybe we'll unpin enough
3451          * space the next time around.
3452          */
3453         if (!ret)
3454                 dcs = BTRFS_DC_SETUP;
3455         else if (ret == -ENOSPC)
3456                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3457         btrfs_free_reserved_data_space(inode, 0, num_pages);
3458
3459 out_put:
3460         iput(inode);
3461 out_free:
3462         btrfs_release_path(path);
3463 out:
3464         spin_lock(&block_group->lock);
3465         if (!ret && dcs == BTRFS_DC_SETUP)
3466                 block_group->cache_generation = trans->transid;
3467         block_group->disk_cache_state = dcs;
3468         spin_unlock(&block_group->lock);
3469
3470         return ret;
3471 }
3472
3473 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3474                             struct btrfs_root *root)
3475 {
3476         struct btrfs_block_group_cache *cache, *tmp;
3477         struct btrfs_transaction *cur_trans = trans->transaction;
3478         struct btrfs_path *path;
3479
3480         if (list_empty(&cur_trans->dirty_bgs) ||
3481             !btrfs_test_opt(root, SPACE_CACHE))
3482                 return 0;
3483
3484         path = btrfs_alloc_path();
3485         if (!path)
3486                 return -ENOMEM;
3487
3488         /* Could add new block groups, use _safe just in case */
3489         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3490                                  dirty_list) {
3491                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3492                         cache_save_setup(cache, trans, path);
3493         }
3494
3495         btrfs_free_path(path);
3496         return 0;
3497 }
3498
3499 /*
3500  * transaction commit does final block group cache writeback during a
3501  * critical section where nothing is allowed to change the FS.  This is
3502  * required in order for the cache to actually match the block group,
3503  * but can introduce a lot of latency into the commit.
3504  *
3505  * So, btrfs_start_dirty_block_groups is here to kick off block group
3506  * cache IO.  There's a chance we'll have to redo some of it if the
3507  * block group changes again during the commit, but it greatly reduces
3508  * the commit latency by getting rid of the easy block groups while
3509  * we're still allowing others to join the commit.
3510  */
3511 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3512                                    struct btrfs_root *root)
3513 {
3514         struct btrfs_block_group_cache *cache;
3515         struct btrfs_transaction *cur_trans = trans->transaction;
3516         int ret = 0;
3517         int should_put;
3518         struct btrfs_path *path = NULL;
3519         LIST_HEAD(dirty);
3520         struct list_head *io = &cur_trans->io_bgs;
3521         int num_started = 0;
3522         int loops = 0;
3523
3524         spin_lock(&cur_trans->dirty_bgs_lock);
3525         if (list_empty(&cur_trans->dirty_bgs)) {
3526                 spin_unlock(&cur_trans->dirty_bgs_lock);
3527                 return 0;
3528         }
3529         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3530         spin_unlock(&cur_trans->dirty_bgs_lock);
3531
3532 again:
3533         /*
3534          * make sure all the block groups on our dirty list actually
3535          * exist
3536          */
3537         btrfs_create_pending_block_groups(trans, root);
3538
3539         if (!path) {
3540                 path = btrfs_alloc_path();
3541                 if (!path)
3542                         return -ENOMEM;
3543         }
3544
3545         /*
3546          * cache_write_mutex is here only to save us from balance or automatic
3547          * removal of empty block groups deleting this block group while we are
3548          * writing out the cache
3549          */
3550         mutex_lock(&trans->transaction->cache_write_mutex);
3551         while (!list_empty(&dirty)) {
3552                 cache = list_first_entry(&dirty,
3553                                          struct btrfs_block_group_cache,
3554                                          dirty_list);
3555                 /*
3556                  * this can happen if something re-dirties a block
3557                  * group that is already under IO.  Just wait for it to
3558                  * finish and then do it all again
3559                  */
3560                 if (!list_empty(&cache->io_list)) {
3561                         list_del_init(&cache->io_list);
3562                         btrfs_wait_cache_io(root, trans, cache,
3563                                             &cache->io_ctl, path,
3564                                             cache->key.objectid);
3565                         btrfs_put_block_group(cache);
3566                 }
3567
3568
3569                 /*
3570                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3571                  * if it should update the cache_state.  Don't delete
3572                  * until after we wait.
3573                  *
3574                  * Since we're not running in the commit critical section
3575                  * we need the dirty_bgs_lock to protect from update_block_group
3576                  */
3577                 spin_lock(&cur_trans->dirty_bgs_lock);
3578                 list_del_init(&cache->dirty_list);
3579                 spin_unlock(&cur_trans->dirty_bgs_lock);
3580
3581                 should_put = 1;
3582
3583                 cache_save_setup(cache, trans, path);
3584
3585                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3586                         cache->io_ctl.inode = NULL;
3587                         ret = btrfs_write_out_cache(root, trans, cache, path);
3588                         if (ret == 0 && cache->io_ctl.inode) {
3589                                 num_started++;
3590                                 should_put = 0;
3591
3592                                 /*
3593                                  * the cache_write_mutex is protecting
3594                                  * the io_list
3595                                  */
3596                                 list_add_tail(&cache->io_list, io);
3597                         } else {
3598                                 /*
3599                                  * if we failed to write the cache, the
3600                                  * generation will be bad and life goes on
3601                                  */
3602                                 ret = 0;
3603                         }
3604                 }
3605                 if (!ret) {
3606                         ret = write_one_cache_group(trans, root, path, cache);
3607                         /*
3608                          * Our block group might still be attached to the list
3609                          * of new block groups in the transaction handle of some
3610                          * other task (struct btrfs_trans_handle->new_bgs). This
3611                          * means its block group item isn't yet in the extent
3612                          * tree. If this happens ignore the error, as we will
3613                          * try again later in the critical section of the
3614                          * transaction commit.
3615                          */
3616                         if (ret == -ENOENT) {
3617                                 ret = 0;
3618                                 spin_lock(&cur_trans->dirty_bgs_lock);
3619                                 if (list_empty(&cache->dirty_list)) {
3620                                         list_add_tail(&cache->dirty_list,
3621                                                       &cur_trans->dirty_bgs);
3622                                         btrfs_get_block_group(cache);
3623                                 }
3624                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3625                         } else if (ret) {
3626                                 btrfs_abort_transaction(trans, root, ret);
3627                         }
3628                 }
3629
3630                 /* if its not on the io list, we need to put the block group */
3631                 if (should_put)
3632                         btrfs_put_block_group(cache);
3633
3634                 if (ret)
3635                         break;
3636
3637                 /*
3638                  * Avoid blocking other tasks for too long. It might even save
3639                  * us from writing caches for block groups that are going to be
3640                  * removed.
3641                  */
3642                 mutex_unlock(&trans->transaction->cache_write_mutex);
3643                 mutex_lock(&trans->transaction->cache_write_mutex);
3644         }
3645         mutex_unlock(&trans->transaction->cache_write_mutex);
3646
3647         /*
3648          * go through delayed refs for all the stuff we've just kicked off
3649          * and then loop back (just once)
3650          */
3651         ret = btrfs_run_delayed_refs(trans, root, 0);
3652         if (!ret && loops == 0) {
3653                 loops++;
3654                 spin_lock(&cur_trans->dirty_bgs_lock);
3655                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3656                 /*
3657                  * dirty_bgs_lock protects us from concurrent block group
3658                  * deletes too (not just cache_write_mutex).
3659                  */
3660                 if (!list_empty(&dirty)) {
3661                         spin_unlock(&cur_trans->dirty_bgs_lock);
3662                         goto again;
3663                 }
3664                 spin_unlock(&cur_trans->dirty_bgs_lock);
3665         }
3666
3667         btrfs_free_path(path);
3668         return ret;
3669 }
3670
3671 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3672                                    struct btrfs_root *root)
3673 {
3674         struct btrfs_block_group_cache *cache;
3675         struct btrfs_transaction *cur_trans = trans->transaction;
3676         int ret = 0;
3677         int should_put;
3678         struct btrfs_path *path;
3679         struct list_head *io = &cur_trans->io_bgs;
3680         int num_started = 0;
3681
3682         path = btrfs_alloc_path();
3683         if (!path)
3684                 return -ENOMEM;
3685
3686         /*
3687          * We don't need the lock here since we are protected by the transaction
3688          * commit.  We want to do the cache_save_setup first and then run the
3689          * delayed refs to make sure we have the best chance at doing this all
3690          * in one shot.
3691          */
3692         while (!list_empty(&cur_trans->dirty_bgs)) {
3693                 cache = list_first_entry(&cur_trans->dirty_bgs,
3694                                          struct btrfs_block_group_cache,
3695                                          dirty_list);
3696
3697                 /*
3698                  * this can happen if cache_save_setup re-dirties a block
3699                  * group that is already under IO.  Just wait for it to
3700                  * finish and then do it all again
3701                  */
3702                 if (!list_empty(&cache->io_list)) {
3703                         list_del_init(&cache->io_list);
3704                         btrfs_wait_cache_io(root, trans, cache,
3705                                             &cache->io_ctl, path,
3706                                             cache->key.objectid);
3707                         btrfs_put_block_group(cache);
3708                 }
3709
3710                 /*
3711                  * don't remove from the dirty list until after we've waited
3712                  * on any pending IO
3713                  */
3714                 list_del_init(&cache->dirty_list);
3715                 should_put = 1;
3716
3717                 cache_save_setup(cache, trans, path);
3718
3719                 if (!ret)
3720                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3721
3722                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3723                         cache->io_ctl.inode = NULL;
3724                         ret = btrfs_write_out_cache(root, trans, cache, path);
3725                         if (ret == 0 && cache->io_ctl.inode) {
3726                                 num_started++;
3727                                 should_put = 0;
3728                                 list_add_tail(&cache->io_list, io);
3729                         } else {
3730                                 /*
3731                                  * if we failed to write the cache, the
3732                                  * generation will be bad and life goes on
3733                                  */
3734                                 ret = 0;
3735                         }
3736                 }
3737                 if (!ret) {
3738                         ret = write_one_cache_group(trans, root, path, cache);
3739                         if (ret)
3740                                 btrfs_abort_transaction(trans, root, ret);
3741                 }
3742
3743                 /* if its not on the io list, we need to put the block group */
3744                 if (should_put)
3745                         btrfs_put_block_group(cache);
3746         }
3747
3748         while (!list_empty(io)) {
3749                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3750                                          io_list);
3751                 list_del_init(&cache->io_list);
3752                 btrfs_wait_cache_io(root, trans, cache,
3753                                     &cache->io_ctl, path, cache->key.objectid);
3754                 btrfs_put_block_group(cache);
3755         }
3756
3757         btrfs_free_path(path);
3758         return ret;
3759 }
3760
3761 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3762 {
3763         struct btrfs_block_group_cache *block_group;
3764         int readonly = 0;
3765
3766         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3767         if (!block_group || block_group->ro)
3768                 readonly = 1;
3769         if (block_group)
3770                 btrfs_put_block_group(block_group);
3771         return readonly;
3772 }
3773
3774 static const char *alloc_name(u64 flags)
3775 {
3776         switch (flags) {
3777         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3778                 return "mixed";
3779         case BTRFS_BLOCK_GROUP_METADATA:
3780                 return "metadata";
3781         case BTRFS_BLOCK_GROUP_DATA:
3782                 return "data";
3783         case BTRFS_BLOCK_GROUP_SYSTEM:
3784                 return "system";
3785         default:
3786                 WARN_ON(1);
3787                 return "invalid-combination";
3788         };
3789 }
3790
3791 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3792                              u64 total_bytes, u64 bytes_used,
3793                              struct btrfs_space_info **space_info)
3794 {
3795         struct btrfs_space_info *found;
3796         int i;
3797         int factor;
3798         int ret;
3799
3800         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3801                      BTRFS_BLOCK_GROUP_RAID10))
3802                 factor = 2;
3803         else
3804                 factor = 1;
3805
3806         found = __find_space_info(info, flags);
3807         if (found) {
3808                 spin_lock(&found->lock);
3809                 found->total_bytes += total_bytes;
3810                 found->disk_total += total_bytes * factor;
3811                 found->bytes_used += bytes_used;
3812                 found->disk_used += bytes_used * factor;
3813                 if (total_bytes > 0)
3814                         found->full = 0;
3815                 spin_unlock(&found->lock);
3816                 *space_info = found;
3817                 return 0;
3818         }
3819         found = kzalloc(sizeof(*found), GFP_NOFS);
3820         if (!found)
3821                 return -ENOMEM;
3822
3823         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3824         if (ret) {
3825                 kfree(found);
3826                 return ret;
3827         }
3828
3829         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3830                 INIT_LIST_HEAD(&found->block_groups[i]);
3831         init_rwsem(&found->groups_sem);
3832         spin_lock_init(&found->lock);
3833         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3834         found->total_bytes = total_bytes;
3835         found->disk_total = total_bytes * factor;
3836         found->bytes_used = bytes_used;
3837         found->disk_used = bytes_used * factor;
3838         found->bytes_pinned = 0;
3839         found->bytes_reserved = 0;
3840         found->bytes_readonly = 0;
3841         found->bytes_may_use = 0;
3842         found->full = 0;
3843         found->max_extent_size = 0;
3844         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3845         found->chunk_alloc = 0;
3846         found->flush = 0;
3847         init_waitqueue_head(&found->wait);
3848         INIT_LIST_HEAD(&found->ro_bgs);
3849
3850         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3851                                     info->space_info_kobj, "%s",
3852                                     alloc_name(found->flags));
3853         if (ret) {
3854                 kfree(found);
3855                 return ret;
3856         }
3857
3858         *space_info = found;
3859         list_add_rcu(&found->list, &info->space_info);
3860         if (flags & BTRFS_BLOCK_GROUP_DATA)
3861                 info->data_sinfo = found;
3862
3863         return ret;
3864 }
3865
3866 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3867 {
3868         u64 extra_flags = chunk_to_extended(flags) &
3869                                 BTRFS_EXTENDED_PROFILE_MASK;
3870
3871         write_seqlock(&fs_info->profiles_lock);
3872         if (flags & BTRFS_BLOCK_GROUP_DATA)
3873                 fs_info->avail_data_alloc_bits |= extra_flags;
3874         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3875                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3876         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3877                 fs_info->avail_system_alloc_bits |= extra_flags;
3878         write_sequnlock(&fs_info->profiles_lock);
3879 }
3880
3881 /*
3882  * returns target flags in extended format or 0 if restripe for this
3883  * chunk_type is not in progress
3884  *
3885  * should be called with either volume_mutex or balance_lock held
3886  */
3887 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3888 {
3889         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3890         u64 target = 0;
3891
3892         if (!bctl)
3893                 return 0;
3894
3895         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3896             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3897                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3898         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3899                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3900                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3901         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3902                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3903                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3904         }
3905
3906         return target;
3907 }
3908
3909 /*
3910  * @flags: available profiles in extended format (see ctree.h)
3911  *
3912  * Returns reduced profile in chunk format.  If profile changing is in
3913  * progress (either running or paused) picks the target profile (if it's
3914  * already available), otherwise falls back to plain reducing.
3915  */
3916 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3917 {
3918         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3919         u64 target;
3920         u64 raid_type;
3921         u64 allowed = 0;
3922
3923         /*
3924          * see if restripe for this chunk_type is in progress, if so
3925          * try to reduce to the target profile
3926          */
3927         spin_lock(&root->fs_info->balance_lock);
3928         target = get_restripe_target(root->fs_info, flags);
3929         if (target) {
3930                 /* pick target profile only if it's already available */
3931                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3932                         spin_unlock(&root->fs_info->balance_lock);
3933                         return extended_to_chunk(target);
3934                 }
3935         }
3936         spin_unlock(&root->fs_info->balance_lock);
3937
3938         /* First, mask out the RAID levels which aren't possible */
3939         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3940                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3941                         allowed |= btrfs_raid_group[raid_type];
3942         }
3943         allowed &= flags;
3944
3945         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3946                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3947         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3948                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3949         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3950                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3951         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3952                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3953         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3954                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3955
3956         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3957
3958         return extended_to_chunk(flags | allowed);
3959 }
3960
3961 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3962 {
3963         unsigned seq;
3964         u64 flags;
3965
3966         do {
3967                 flags = orig_flags;
3968                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3969
3970                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3971                         flags |= root->fs_info->avail_data_alloc_bits;
3972                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3973                         flags |= root->fs_info->avail_system_alloc_bits;
3974                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3975                         flags |= root->fs_info->avail_metadata_alloc_bits;
3976         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3977
3978         return btrfs_reduce_alloc_profile(root, flags);
3979 }
3980
3981 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3982 {
3983         u64 flags;
3984         u64 ret;
3985
3986         if (data)
3987                 flags = BTRFS_BLOCK_GROUP_DATA;
3988         else if (root == root->fs_info->chunk_root)
3989                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3990         else
3991                 flags = BTRFS_BLOCK_GROUP_METADATA;
3992
3993         ret = get_alloc_profile(root, flags);
3994         return ret;
3995 }
3996
3997 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
3998 {
3999         struct btrfs_space_info *data_sinfo;
4000         struct btrfs_root *root = BTRFS_I(inode)->root;
4001         struct btrfs_fs_info *fs_info = root->fs_info;
4002         u64 used;
4003         int ret = 0;
4004         int need_commit = 2;
4005         int have_pinned_space;
4006
4007         /* make sure bytes are sectorsize aligned */
4008         bytes = ALIGN(bytes, root->sectorsize);
4009
4010         if (btrfs_is_free_space_inode(inode)) {
4011                 need_commit = 0;
4012                 ASSERT(current->journal_info);
4013         }
4014
4015         data_sinfo = fs_info->data_sinfo;
4016         if (!data_sinfo)
4017                 goto alloc;
4018
4019 again:
4020         /* make sure we have enough space to handle the data first */
4021         spin_lock(&data_sinfo->lock);
4022         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4023                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4024                 data_sinfo->bytes_may_use;
4025
4026         if (used + bytes > data_sinfo->total_bytes) {
4027                 struct btrfs_trans_handle *trans;
4028
4029                 /*
4030                  * if we don't have enough free bytes in this space then we need
4031                  * to alloc a new chunk.
4032                  */
4033                 if (!data_sinfo->full) {
4034                         u64 alloc_target;
4035
4036                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4037                         spin_unlock(&data_sinfo->lock);
4038 alloc:
4039                         alloc_target = btrfs_get_alloc_profile(root, 1);
4040                         /*
4041                          * It is ugly that we don't call nolock join
4042                          * transaction for the free space inode case here.
4043                          * But it is safe because we only do the data space
4044                          * reservation for the free space cache in the
4045                          * transaction context, the common join transaction
4046                          * just increase the counter of the current transaction
4047                          * handler, doesn't try to acquire the trans_lock of
4048                          * the fs.
4049                          */
4050                         trans = btrfs_join_transaction(root);
4051                         if (IS_ERR(trans))
4052                                 return PTR_ERR(trans);
4053
4054                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4055                                              alloc_target,
4056                                              CHUNK_ALLOC_NO_FORCE);
4057                         btrfs_end_transaction(trans, root);
4058                         if (ret < 0) {
4059                                 if (ret != -ENOSPC)
4060                                         return ret;
4061                                 else {
4062                                         have_pinned_space = 1;
4063                                         goto commit_trans;
4064                                 }
4065                         }
4066
4067                         if (!data_sinfo)
4068                                 data_sinfo = fs_info->data_sinfo;
4069
4070                         goto again;
4071                 }
4072
4073                 /*
4074                  * If we don't have enough pinned space to deal with this
4075                  * allocation, and no removed chunk in current transaction,
4076                  * don't bother committing the transaction.
4077                  */
4078                 have_pinned_space = percpu_counter_compare(
4079                         &data_sinfo->total_bytes_pinned,
4080                         used + bytes - data_sinfo->total_bytes);
4081                 spin_unlock(&data_sinfo->lock);
4082
4083                 /* commit the current transaction and try again */
4084 commit_trans:
4085                 if (need_commit &&
4086                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4087                         need_commit--;
4088
4089                         if (need_commit > 0)
4090                                 btrfs_wait_ordered_roots(fs_info, -1);
4091
4092                         trans = btrfs_join_transaction(root);
4093                         if (IS_ERR(trans))
4094                                 return PTR_ERR(trans);
4095                         if (have_pinned_space >= 0 ||
4096                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4097                                      &trans->transaction->flags) ||
4098                             need_commit > 0) {
4099                                 ret = btrfs_commit_transaction(trans, root);
4100                                 if (ret)
4101                                         return ret;
4102                                 /*
4103                                  * make sure that all running delayed iput are
4104                                  * done
4105                                  */
4106                                 down_write(&root->fs_info->delayed_iput_sem);
4107                                 up_write(&root->fs_info->delayed_iput_sem);
4108                                 goto again;
4109                         } else {
4110                                 btrfs_end_transaction(trans, root);
4111                         }
4112                 }
4113
4114                 trace_btrfs_space_reservation(root->fs_info,
4115                                               "space_info:enospc",
4116                                               data_sinfo->flags, bytes, 1);
4117                 return -ENOSPC;
4118         }
4119         data_sinfo->bytes_may_use += bytes;
4120         trace_btrfs_space_reservation(root->fs_info, "space_info",
4121                                       data_sinfo->flags, bytes, 1);
4122         spin_unlock(&data_sinfo->lock);
4123
4124         return ret;
4125 }
4126
4127 /*
4128  * New check_data_free_space() with ability for precious data reservation
4129  * Will replace old btrfs_check_data_free_space(), but for patch split,
4130  * add a new function first and then replace it.
4131  */
4132 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4133 {
4134         struct btrfs_root *root = BTRFS_I(inode)->root;
4135         int ret;
4136
4137         /* align the range */
4138         len = round_up(start + len, root->sectorsize) -
4139               round_down(start, root->sectorsize);
4140         start = round_down(start, root->sectorsize);
4141
4142         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4143         if (ret < 0)
4144                 return ret;
4145
4146         /*
4147          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4148          *
4149          * TODO: Find a good method to avoid reserve data space for NOCOW
4150          * range, but don't impact performance on quota disable case.
4151          */
4152         ret = btrfs_qgroup_reserve_data(inode, start, len);
4153         return ret;
4154 }
4155
4156 /*
4157  * Called if we need to clear a data reservation for this inode
4158  * Normally in a error case.
4159  *
4160  * This one will *NOT* use accurate qgroup reserved space API, just for case
4161  * which we can't sleep and is sure it won't affect qgroup reserved space.
4162  * Like clear_bit_hook().
4163  */
4164 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4165                                             u64 len)
4166 {
4167         struct btrfs_root *root = BTRFS_I(inode)->root;
4168         struct btrfs_space_info *data_sinfo;
4169
4170         /* Make sure the range is aligned to sectorsize */
4171         len = round_up(start + len, root->sectorsize) -
4172               round_down(start, root->sectorsize);
4173         start = round_down(start, root->sectorsize);
4174
4175         data_sinfo = root->fs_info->data_sinfo;
4176         spin_lock(&data_sinfo->lock);
4177         if (WARN_ON(data_sinfo->bytes_may_use < len))
4178                 data_sinfo->bytes_may_use = 0;
4179         else
4180                 data_sinfo->bytes_may_use -= len;
4181         trace_btrfs_space_reservation(root->fs_info, "space_info",
4182                                       data_sinfo->flags, len, 0);
4183         spin_unlock(&data_sinfo->lock);
4184 }
4185
4186 /*
4187  * Called if we need to clear a data reservation for this inode
4188  * Normally in a error case.
4189  *
4190  * This one will handle the per-indoe data rsv map for accurate reserved
4191  * space framework.
4192  */
4193 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4194 {
4195         btrfs_free_reserved_data_space_noquota(inode, start, len);
4196         btrfs_qgroup_free_data(inode, start, len);
4197 }
4198
4199 static void force_metadata_allocation(struct btrfs_fs_info *info)
4200 {
4201         struct list_head *head = &info->space_info;
4202         struct btrfs_space_info *found;
4203
4204         rcu_read_lock();
4205         list_for_each_entry_rcu(found, head, list) {
4206                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4207                         found->force_alloc = CHUNK_ALLOC_FORCE;
4208         }
4209         rcu_read_unlock();
4210 }
4211
4212 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4213 {
4214         return (global->size << 1);
4215 }
4216
4217 static int should_alloc_chunk(struct btrfs_root *root,
4218                               struct btrfs_space_info *sinfo, int force)
4219 {
4220         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4221         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4222         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4223         u64 thresh;
4224
4225         if (force == CHUNK_ALLOC_FORCE)
4226                 return 1;
4227
4228         /*
4229          * We need to take into account the global rsv because for all intents
4230          * and purposes it's used space.  Don't worry about locking the
4231          * global_rsv, it doesn't change except when the transaction commits.
4232          */
4233         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4234                 num_allocated += calc_global_rsv_need_space(global_rsv);
4235
4236         /*
4237          * in limited mode, we want to have some free space up to
4238          * about 1% of the FS size.
4239          */
4240         if (force == CHUNK_ALLOC_LIMITED) {
4241                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4242                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4243
4244                 if (num_bytes - num_allocated < thresh)
4245                         return 1;
4246         }
4247
4248         if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4249                 return 0;
4250         return 1;
4251 }
4252
4253 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4254 {
4255         u64 num_dev;
4256
4257         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4258                     BTRFS_BLOCK_GROUP_RAID0 |
4259                     BTRFS_BLOCK_GROUP_RAID5 |
4260                     BTRFS_BLOCK_GROUP_RAID6))
4261                 num_dev = root->fs_info->fs_devices->rw_devices;
4262         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4263                 num_dev = 2;
4264         else
4265                 num_dev = 1;    /* DUP or single */
4266
4267         return num_dev;
4268 }
4269
4270 /*
4271  * If @is_allocation is true, reserve space in the system space info necessary
4272  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4273  * removing a chunk.
4274  */
4275 void check_system_chunk(struct btrfs_trans_handle *trans,
4276                         struct btrfs_root *root,
4277                         u64 type)
4278 {
4279         struct btrfs_space_info *info;
4280         u64 left;
4281         u64 thresh;
4282         int ret = 0;
4283         u64 num_devs;
4284
4285         /*
4286          * Needed because we can end up allocating a system chunk and for an
4287          * atomic and race free space reservation in the chunk block reserve.
4288          */
4289         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4290
4291         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4292         spin_lock(&info->lock);
4293         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4294                 info->bytes_reserved - info->bytes_readonly -
4295                 info->bytes_may_use;
4296         spin_unlock(&info->lock);
4297
4298         num_devs = get_profile_num_devs(root, type);
4299
4300         /* num_devs device items to update and 1 chunk item to add or remove */
4301         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4302                 btrfs_calc_trans_metadata_size(root, 1);
4303
4304         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4305                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4306                         left, thresh, type);
4307                 dump_space_info(info, 0, 0);
4308         }
4309
4310         if (left < thresh) {
4311                 u64 flags;
4312
4313                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4314                 /*
4315                  * Ignore failure to create system chunk. We might end up not
4316                  * needing it, as we might not need to COW all nodes/leafs from
4317                  * the paths we visit in the chunk tree (they were already COWed
4318                  * or created in the current transaction for example).
4319                  */
4320                 ret = btrfs_alloc_chunk(trans, root, flags);
4321         }
4322
4323         if (!ret) {
4324                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4325                                           &root->fs_info->chunk_block_rsv,
4326                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4327                 if (!ret)
4328                         trans->chunk_bytes_reserved += thresh;
4329         }
4330 }
4331
4332 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4333                           struct btrfs_root *extent_root, u64 flags, int force)
4334 {
4335         struct btrfs_space_info *space_info;
4336         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4337         int wait_for_alloc = 0;
4338         int ret = 0;
4339
4340         /* Don't re-enter if we're already allocating a chunk */
4341         if (trans->allocating_chunk)
4342                 return -ENOSPC;
4343
4344         space_info = __find_space_info(extent_root->fs_info, flags);
4345         if (!space_info) {
4346                 ret = update_space_info(extent_root->fs_info, flags,
4347                                         0, 0, &space_info);
4348                 BUG_ON(ret); /* -ENOMEM */
4349         }
4350         BUG_ON(!space_info); /* Logic error */
4351
4352 again:
4353         spin_lock(&space_info->lock);
4354         if (force < space_info->force_alloc)
4355                 force = space_info->force_alloc;
4356         if (space_info->full) {
4357                 if (should_alloc_chunk(extent_root, space_info, force))
4358                         ret = -ENOSPC;
4359                 else
4360                         ret = 0;
4361                 spin_unlock(&space_info->lock);
4362                 return ret;
4363         }
4364
4365         if (!should_alloc_chunk(extent_root, space_info, force)) {
4366                 spin_unlock(&space_info->lock);
4367                 return 0;
4368         } else if (space_info->chunk_alloc) {
4369                 wait_for_alloc = 1;
4370         } else {
4371                 space_info->chunk_alloc = 1;
4372         }
4373
4374         spin_unlock(&space_info->lock);
4375
4376         mutex_lock(&fs_info->chunk_mutex);
4377
4378         /*
4379          * The chunk_mutex is held throughout the entirety of a chunk
4380          * allocation, so once we've acquired the chunk_mutex we know that the
4381          * other guy is done and we need to recheck and see if we should
4382          * allocate.
4383          */
4384         if (wait_for_alloc) {
4385                 mutex_unlock(&fs_info->chunk_mutex);
4386                 wait_for_alloc = 0;
4387                 goto again;
4388         }
4389
4390         trans->allocating_chunk = true;
4391
4392         /*
4393          * If we have mixed data/metadata chunks we want to make sure we keep
4394          * allocating mixed chunks instead of individual chunks.
4395          */
4396         if (btrfs_mixed_space_info(space_info))
4397                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4398
4399         /*
4400          * if we're doing a data chunk, go ahead and make sure that
4401          * we keep a reasonable number of metadata chunks allocated in the
4402          * FS as well.
4403          */
4404         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4405                 fs_info->data_chunk_allocations++;
4406                 if (!(fs_info->data_chunk_allocations %
4407                       fs_info->metadata_ratio))
4408                         force_metadata_allocation(fs_info);
4409         }
4410
4411         /*
4412          * Check if we have enough space in SYSTEM chunk because we may need
4413          * to update devices.
4414          */
4415         check_system_chunk(trans, extent_root, flags);
4416
4417         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4418         trans->allocating_chunk = false;
4419
4420         spin_lock(&space_info->lock);
4421         if (ret < 0 && ret != -ENOSPC)
4422                 goto out;
4423         if (ret)
4424                 space_info->full = 1;
4425         else
4426                 ret = 1;
4427
4428         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4429 out:
4430         space_info->chunk_alloc = 0;
4431         spin_unlock(&space_info->lock);
4432         mutex_unlock(&fs_info->chunk_mutex);
4433         /*
4434          * When we allocate a new chunk we reserve space in the chunk block
4435          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4436          * add new nodes/leafs to it if we end up needing to do it when
4437          * inserting the chunk item and updating device items as part of the
4438          * second phase of chunk allocation, performed by
4439          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4440          * large number of new block groups to create in our transaction
4441          * handle's new_bgs list to avoid exhausting the chunk block reserve
4442          * in extreme cases - like having a single transaction create many new
4443          * block groups when starting to write out the free space caches of all
4444          * the block groups that were made dirty during the lifetime of the
4445          * transaction.
4446          */
4447         if (trans->can_flush_pending_bgs &&
4448             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4449                 btrfs_create_pending_block_groups(trans, trans->root);
4450                 btrfs_trans_release_chunk_metadata(trans);
4451         }
4452         return ret;
4453 }
4454
4455 static int can_overcommit(struct btrfs_root *root,
4456                           struct btrfs_space_info *space_info, u64 bytes,
4457                           enum btrfs_reserve_flush_enum flush)
4458 {
4459         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4460         u64 profile = btrfs_get_alloc_profile(root, 0);
4461         u64 space_size;
4462         u64 avail;
4463         u64 used;
4464
4465         used = space_info->bytes_used + space_info->bytes_reserved +
4466                 space_info->bytes_pinned + space_info->bytes_readonly;
4467
4468         /*
4469          * We only want to allow over committing if we have lots of actual space
4470          * free, but if we don't have enough space to handle the global reserve
4471          * space then we could end up having a real enospc problem when trying
4472          * to allocate a chunk or some other such important allocation.
4473          */
4474         spin_lock(&global_rsv->lock);
4475         space_size = calc_global_rsv_need_space(global_rsv);
4476         spin_unlock(&global_rsv->lock);
4477         if (used + space_size >= space_info->total_bytes)
4478                 return 0;
4479
4480         used += space_info->bytes_may_use;
4481
4482         spin_lock(&root->fs_info->free_chunk_lock);
4483         avail = root->fs_info->free_chunk_space;
4484         spin_unlock(&root->fs_info->free_chunk_lock);
4485
4486         /*
4487          * If we have dup, raid1 or raid10 then only half of the free
4488          * space is actually useable.  For raid56, the space info used
4489          * doesn't include the parity drive, so we don't have to
4490          * change the math
4491          */
4492         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4493                        BTRFS_BLOCK_GROUP_RAID1 |
4494                        BTRFS_BLOCK_GROUP_RAID10))
4495                 avail >>= 1;
4496
4497         /*
4498          * If we aren't flushing all things, let us overcommit up to
4499          * 1/2th of the space. If we can flush, don't let us overcommit
4500          * too much, let it overcommit up to 1/8 of the space.
4501          */
4502         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4503                 avail >>= 3;
4504         else
4505                 avail >>= 1;
4506
4507         if (used + bytes < space_info->total_bytes + avail)
4508                 return 1;
4509         return 0;
4510 }
4511
4512 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4513                                          unsigned long nr_pages, int nr_items)
4514 {
4515         struct super_block *sb = root->fs_info->sb;
4516
4517         if (down_read_trylock(&sb->s_umount)) {
4518                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4519                 up_read(&sb->s_umount);
4520         } else {
4521                 /*
4522                  * We needn't worry the filesystem going from r/w to r/o though
4523                  * we don't acquire ->s_umount mutex, because the filesystem
4524                  * should guarantee the delalloc inodes list be empty after
4525                  * the filesystem is readonly(all dirty pages are written to
4526                  * the disk).
4527                  */
4528                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4529                 if (!current->journal_info)
4530                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4531         }
4532 }
4533
4534 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4535 {
4536         u64 bytes;
4537         int nr;
4538
4539         bytes = btrfs_calc_trans_metadata_size(root, 1);
4540         nr = (int)div64_u64(to_reclaim, bytes);
4541         if (!nr)
4542                 nr = 1;
4543         return nr;
4544 }
4545
4546 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4547
4548 /*
4549  * shrink metadata reservation for delalloc
4550  */
4551 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4552                             bool wait_ordered)
4553 {
4554         struct btrfs_block_rsv *block_rsv;
4555         struct btrfs_space_info *space_info;
4556         struct btrfs_trans_handle *trans;
4557         u64 delalloc_bytes;
4558         u64 max_reclaim;
4559         long time_left;
4560         unsigned long nr_pages;
4561         int loops;
4562         int items;
4563         enum btrfs_reserve_flush_enum flush;
4564
4565         /* Calc the number of the pages we need flush for space reservation */
4566         items = calc_reclaim_items_nr(root, to_reclaim);
4567         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4568
4569         trans = (struct btrfs_trans_handle *)current->journal_info;
4570         block_rsv = &root->fs_info->delalloc_block_rsv;
4571         space_info = block_rsv->space_info;
4572
4573         delalloc_bytes = percpu_counter_sum_positive(
4574                                                 &root->fs_info->delalloc_bytes);
4575         if (delalloc_bytes == 0) {
4576                 if (trans)
4577                         return;
4578                 if (wait_ordered)
4579                         btrfs_wait_ordered_roots(root->fs_info, items);
4580                 return;
4581         }
4582
4583         loops = 0;
4584         while (delalloc_bytes && loops < 3) {
4585                 max_reclaim = min(delalloc_bytes, to_reclaim);
4586                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4587                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4588                 /*
4589                  * We need to wait for the async pages to actually start before
4590                  * we do anything.
4591                  */
4592                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4593                 if (!max_reclaim)
4594                         goto skip_async;
4595
4596                 if (max_reclaim <= nr_pages)
4597                         max_reclaim = 0;
4598                 else
4599                         max_reclaim -= nr_pages;
4600
4601                 wait_event(root->fs_info->async_submit_wait,
4602                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4603                            (int)max_reclaim);
4604 skip_async:
4605                 if (!trans)
4606                         flush = BTRFS_RESERVE_FLUSH_ALL;
4607                 else
4608                         flush = BTRFS_RESERVE_NO_FLUSH;
4609                 spin_lock(&space_info->lock);
4610                 if (can_overcommit(root, space_info, orig, flush)) {
4611                         spin_unlock(&space_info->lock);
4612                         break;
4613                 }
4614                 spin_unlock(&space_info->lock);
4615
4616                 loops++;
4617                 if (wait_ordered && !trans) {
4618                         btrfs_wait_ordered_roots(root->fs_info, items);
4619                 } else {
4620                         time_left = schedule_timeout_killable(1);
4621                         if (time_left)
4622                                 break;
4623                 }
4624                 delalloc_bytes = percpu_counter_sum_positive(
4625                                                 &root->fs_info->delalloc_bytes);
4626         }
4627 }
4628
4629 /**
4630  * maybe_commit_transaction - possibly commit the transaction if its ok to
4631  * @root - the root we're allocating for
4632  * @bytes - the number of bytes we want to reserve
4633  * @force - force the commit
4634  *
4635  * This will check to make sure that committing the transaction will actually
4636  * get us somewhere and then commit the transaction if it does.  Otherwise it
4637  * will return -ENOSPC.
4638  */
4639 static int may_commit_transaction(struct btrfs_root *root,
4640                                   struct btrfs_space_info *space_info,
4641                                   u64 bytes, int force)
4642 {
4643         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4644         struct btrfs_trans_handle *trans;
4645
4646         trans = (struct btrfs_trans_handle *)current->journal_info;
4647         if (trans)
4648                 return -EAGAIN;
4649
4650         if (force)
4651                 goto commit;
4652
4653         /* See if there is enough pinned space to make this reservation */
4654         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4655                                    bytes) >= 0)
4656                 goto commit;
4657
4658         /*
4659          * See if there is some space in the delayed insertion reservation for
4660          * this reservation.
4661          */
4662         if (space_info != delayed_rsv->space_info)
4663                 return -ENOSPC;
4664
4665         spin_lock(&delayed_rsv->lock);
4666         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4667                                    bytes - delayed_rsv->size) >= 0) {
4668                 spin_unlock(&delayed_rsv->lock);
4669                 return -ENOSPC;
4670         }
4671         spin_unlock(&delayed_rsv->lock);
4672
4673 commit:
4674         trans = btrfs_join_transaction(root);
4675         if (IS_ERR(trans))
4676                 return -ENOSPC;
4677
4678         return btrfs_commit_transaction(trans, root);
4679 }
4680
4681 enum flush_state {
4682         FLUSH_DELAYED_ITEMS_NR  =       1,
4683         FLUSH_DELAYED_ITEMS     =       2,
4684         FLUSH_DELALLOC          =       3,
4685         FLUSH_DELALLOC_WAIT     =       4,
4686         ALLOC_CHUNK             =       5,
4687         COMMIT_TRANS            =       6,
4688 };
4689
4690 static int flush_space(struct btrfs_root *root,
4691                        struct btrfs_space_info *space_info, u64 num_bytes,
4692                        u64 orig_bytes, int state)
4693 {
4694         struct btrfs_trans_handle *trans;
4695         int nr;
4696         int ret = 0;
4697
4698         switch (state) {
4699         case FLUSH_DELAYED_ITEMS_NR:
4700         case FLUSH_DELAYED_ITEMS:
4701                 if (state == FLUSH_DELAYED_ITEMS_NR)
4702                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4703                 else
4704                         nr = -1;
4705
4706                 trans = btrfs_join_transaction(root);
4707                 if (IS_ERR(trans)) {
4708                         ret = PTR_ERR(trans);
4709                         break;
4710                 }
4711                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4712                 btrfs_end_transaction(trans, root);
4713                 break;
4714         case FLUSH_DELALLOC:
4715         case FLUSH_DELALLOC_WAIT:
4716                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4717                                 state == FLUSH_DELALLOC_WAIT);
4718                 break;
4719         case ALLOC_CHUNK:
4720                 trans = btrfs_join_transaction(root);
4721                 if (IS_ERR(trans)) {
4722                         ret = PTR_ERR(trans);
4723                         break;
4724                 }
4725                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4726                                      btrfs_get_alloc_profile(root, 0),
4727                                      CHUNK_ALLOC_NO_FORCE);
4728                 btrfs_end_transaction(trans, root);
4729                 if (ret == -ENOSPC)
4730                         ret = 0;
4731                 break;
4732         case COMMIT_TRANS:
4733                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4734                 break;
4735         default:
4736                 ret = -ENOSPC;
4737                 break;
4738         }
4739
4740         return ret;
4741 }
4742
4743 static inline u64
4744 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4745                                  struct btrfs_space_info *space_info)
4746 {
4747         u64 used;
4748         u64 expected;
4749         u64 to_reclaim;
4750
4751         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4752         spin_lock(&space_info->lock);
4753         if (can_overcommit(root, space_info, to_reclaim,
4754                            BTRFS_RESERVE_FLUSH_ALL)) {
4755                 to_reclaim = 0;
4756                 goto out;
4757         }
4758
4759         used = space_info->bytes_used + space_info->bytes_reserved +
4760                space_info->bytes_pinned + space_info->bytes_readonly +
4761                space_info->bytes_may_use;
4762         if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4763                 expected = div_factor_fine(space_info->total_bytes, 95);
4764         else
4765                 expected = div_factor_fine(space_info->total_bytes, 90);
4766
4767         if (used > expected)
4768                 to_reclaim = used - expected;
4769         else
4770                 to_reclaim = 0;
4771         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4772                                      space_info->bytes_reserved);
4773 out:
4774         spin_unlock(&space_info->lock);
4775
4776         return to_reclaim;
4777 }
4778
4779 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4780                                         struct btrfs_fs_info *fs_info, u64 used)
4781 {
4782         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4783
4784         /* If we're just plain full then async reclaim just slows us down. */
4785         if (space_info->bytes_used >= thresh)
4786                 return 0;
4787
4788         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4789                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4790 }
4791
4792 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4793                                        struct btrfs_fs_info *fs_info,
4794                                        int flush_state)
4795 {
4796         u64 used;
4797
4798         spin_lock(&space_info->lock);
4799         /*
4800          * We run out of space and have not got any free space via flush_space,
4801          * so don't bother doing async reclaim.
4802          */
4803         if (flush_state > COMMIT_TRANS && space_info->full) {
4804                 spin_unlock(&space_info->lock);
4805                 return 0;
4806         }
4807
4808         used = space_info->bytes_used + space_info->bytes_reserved +
4809                space_info->bytes_pinned + space_info->bytes_readonly +
4810                space_info->bytes_may_use;
4811         if (need_do_async_reclaim(space_info, fs_info, used)) {
4812                 spin_unlock(&space_info->lock);
4813                 return 1;
4814         }
4815         spin_unlock(&space_info->lock);
4816
4817         return 0;
4818 }
4819
4820 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4821 {
4822         struct btrfs_fs_info *fs_info;
4823         struct btrfs_space_info *space_info;
4824         u64 to_reclaim;
4825         int flush_state;
4826
4827         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4828         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4829
4830         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4831                                                       space_info);
4832         if (!to_reclaim)
4833                 return;
4834
4835         flush_state = FLUSH_DELAYED_ITEMS_NR;
4836         do {
4837                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4838                             to_reclaim, flush_state);
4839                 flush_state++;
4840                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4841                                                  flush_state))
4842                         return;
4843         } while (flush_state < COMMIT_TRANS);
4844 }
4845
4846 void btrfs_init_async_reclaim_work(struct work_struct *work)
4847 {
4848         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4849 }
4850
4851 /**
4852  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4853  * @root - the root we're allocating for
4854  * @block_rsv - the block_rsv we're allocating for
4855  * @orig_bytes - the number of bytes we want
4856  * @flush - whether or not we can flush to make our reservation
4857  *
4858  * This will reserve orgi_bytes number of bytes from the space info associated
4859  * with the block_rsv.  If there is not enough space it will make an attempt to
4860  * flush out space to make room.  It will do this by flushing delalloc if
4861  * possible or committing the transaction.  If flush is 0 then no attempts to
4862  * regain reservations will be made and this will fail if there is not enough
4863  * space already.
4864  */
4865 static int reserve_metadata_bytes(struct btrfs_root *root,
4866                                   struct btrfs_block_rsv *block_rsv,
4867                                   u64 orig_bytes,
4868                                   enum btrfs_reserve_flush_enum flush)
4869 {
4870         struct btrfs_space_info *space_info = block_rsv->space_info;
4871         u64 used;
4872         u64 num_bytes = orig_bytes;
4873         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4874         int ret = 0;
4875         bool flushing = false;
4876
4877 again:
4878         ret = 0;
4879         spin_lock(&space_info->lock);
4880         /*
4881          * We only want to wait if somebody other than us is flushing and we
4882          * are actually allowed to flush all things.
4883          */
4884         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4885                space_info->flush) {
4886                 spin_unlock(&space_info->lock);
4887                 /*
4888                  * If we have a trans handle we can't wait because the flusher
4889                  * may have to commit the transaction, which would mean we would
4890                  * deadlock since we are waiting for the flusher to finish, but
4891                  * hold the current transaction open.
4892                  */
4893                 if (current->journal_info)
4894                         return -EAGAIN;
4895                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4896                 /* Must have been killed, return */
4897                 if (ret)
4898                         return -EINTR;
4899
4900                 spin_lock(&space_info->lock);
4901         }
4902
4903         ret = -ENOSPC;
4904         used = space_info->bytes_used + space_info->bytes_reserved +
4905                 space_info->bytes_pinned + space_info->bytes_readonly +
4906                 space_info->bytes_may_use;
4907
4908         /*
4909          * The idea here is that we've not already over-reserved the block group
4910          * then we can go ahead and save our reservation first and then start
4911          * flushing if we need to.  Otherwise if we've already overcommitted
4912          * lets start flushing stuff first and then come back and try to make
4913          * our reservation.
4914          */
4915         if (used <= space_info->total_bytes) {
4916                 if (used + orig_bytes <= space_info->total_bytes) {
4917                         space_info->bytes_may_use += orig_bytes;
4918                         trace_btrfs_space_reservation(root->fs_info,
4919                                 "space_info", space_info->flags, orig_bytes, 1);
4920                         ret = 0;
4921                 } else {
4922                         /*
4923                          * Ok set num_bytes to orig_bytes since we aren't
4924                          * overocmmitted, this way we only try and reclaim what
4925                          * we need.
4926                          */
4927                         num_bytes = orig_bytes;
4928                 }
4929         } else {
4930                 /*
4931                  * Ok we're over committed, set num_bytes to the overcommitted
4932                  * amount plus the amount of bytes that we need for this
4933                  * reservation.
4934                  */
4935                 num_bytes = used - space_info->total_bytes +
4936                         (orig_bytes * 2);
4937         }
4938
4939         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4940                 space_info->bytes_may_use += orig_bytes;
4941                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4942                                               space_info->flags, orig_bytes,
4943                                               1);
4944                 ret = 0;
4945         }
4946
4947         /*
4948          * Couldn't make our reservation, save our place so while we're trying
4949          * to reclaim space we can actually use it instead of somebody else
4950          * stealing it from us.
4951          *
4952          * We make the other tasks wait for the flush only when we can flush
4953          * all things.
4954          */
4955         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4956                 flushing = true;
4957                 space_info->flush = 1;
4958         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4959                 used += orig_bytes;
4960                 /*
4961                  * We will do the space reservation dance during log replay,
4962                  * which means we won't have fs_info->fs_root set, so don't do
4963                  * the async reclaim as we will panic.
4964                  */
4965                 if (!root->fs_info->log_root_recovering &&
4966                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4967                     !work_busy(&root->fs_info->async_reclaim_work))
4968                         queue_work(system_unbound_wq,
4969                                    &root->fs_info->async_reclaim_work);
4970         }
4971         spin_unlock(&space_info->lock);
4972
4973         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4974                 goto out;
4975
4976         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4977                           flush_state);
4978         flush_state++;
4979
4980         /*
4981          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4982          * would happen. So skip delalloc flush.
4983          */
4984         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4985             (flush_state == FLUSH_DELALLOC ||
4986              flush_state == FLUSH_DELALLOC_WAIT))
4987                 flush_state = ALLOC_CHUNK;
4988
4989         if (!ret)
4990                 goto again;
4991         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4992                  flush_state < COMMIT_TRANS)
4993                 goto again;
4994         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4995                  flush_state <= COMMIT_TRANS)
4996                 goto again;
4997
4998 out:
4999         if (ret == -ENOSPC &&
5000             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5001                 struct btrfs_block_rsv *global_rsv =
5002                         &root->fs_info->global_block_rsv;
5003
5004                 if (block_rsv != global_rsv &&
5005                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5006                         ret = 0;
5007         }
5008         if (ret == -ENOSPC)
5009                 trace_btrfs_space_reservation(root->fs_info,
5010                                               "space_info:enospc",
5011                                               space_info->flags, orig_bytes, 1);
5012         if (flushing) {
5013                 spin_lock(&space_info->lock);
5014                 space_info->flush = 0;
5015                 wake_up_all(&space_info->wait);
5016                 spin_unlock(&space_info->lock);
5017         }
5018         return ret;
5019 }
5020
5021 static struct btrfs_block_rsv *get_block_rsv(
5022                                         const struct btrfs_trans_handle *trans,
5023                                         const struct btrfs_root *root)
5024 {
5025         struct btrfs_block_rsv *block_rsv = NULL;
5026
5027         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5028             (root == root->fs_info->csum_root && trans->adding_csums) ||
5029              (root == root->fs_info->uuid_root))
5030                 block_rsv = trans->block_rsv;
5031
5032         if (!block_rsv)
5033                 block_rsv = root->block_rsv;
5034
5035         if (!block_rsv)
5036                 block_rsv = &root->fs_info->empty_block_rsv;
5037
5038         return block_rsv;
5039 }
5040
5041 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5042                                u64 num_bytes)
5043 {
5044         int ret = -ENOSPC;
5045         spin_lock(&block_rsv->lock);
5046         if (block_rsv->reserved >= num_bytes) {
5047                 block_rsv->reserved -= num_bytes;
5048                 if (block_rsv->reserved < block_rsv->size)
5049                         block_rsv->full = 0;
5050                 ret = 0;
5051         }
5052         spin_unlock(&block_rsv->lock);
5053         return ret;
5054 }
5055
5056 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5057                                 u64 num_bytes, int update_size)
5058 {
5059         spin_lock(&block_rsv->lock);
5060         block_rsv->reserved += num_bytes;
5061         if (update_size)
5062                 block_rsv->size += num_bytes;
5063         else if (block_rsv->reserved >= block_rsv->size)
5064                 block_rsv->full = 1;
5065         spin_unlock(&block_rsv->lock);
5066 }
5067
5068 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5069                              struct btrfs_block_rsv *dest, u64 num_bytes,
5070                              int min_factor)
5071 {
5072         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5073         u64 min_bytes;
5074
5075         if (global_rsv->space_info != dest->space_info)
5076                 return -ENOSPC;
5077
5078         spin_lock(&global_rsv->lock);
5079         min_bytes = div_factor(global_rsv->size, min_factor);
5080         if (global_rsv->reserved < min_bytes + num_bytes) {
5081                 spin_unlock(&global_rsv->lock);
5082                 return -ENOSPC;
5083         }
5084         global_rsv->reserved -= num_bytes;
5085         if (global_rsv->reserved < global_rsv->size)
5086                 global_rsv->full = 0;
5087         spin_unlock(&global_rsv->lock);
5088
5089         block_rsv_add_bytes(dest, num_bytes, 1);
5090         return 0;
5091 }
5092
5093 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5094                                     struct btrfs_block_rsv *block_rsv,
5095                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5096 {
5097         struct btrfs_space_info *space_info = block_rsv->space_info;
5098
5099         spin_lock(&block_rsv->lock);
5100         if (num_bytes == (u64)-1)
5101                 num_bytes = block_rsv->size;
5102         block_rsv->size -= num_bytes;
5103         if (block_rsv->reserved >= block_rsv->size) {
5104                 num_bytes = block_rsv->reserved - block_rsv->size;
5105                 block_rsv->reserved = block_rsv->size;
5106                 block_rsv->full = 1;
5107         } else {
5108                 num_bytes = 0;
5109         }
5110         spin_unlock(&block_rsv->lock);
5111
5112         if (num_bytes > 0) {
5113                 if (dest) {
5114                         spin_lock(&dest->lock);
5115                         if (!dest->full) {
5116                                 u64 bytes_to_add;
5117
5118                                 bytes_to_add = dest->size - dest->reserved;
5119                                 bytes_to_add = min(num_bytes, bytes_to_add);
5120                                 dest->reserved += bytes_to_add;
5121                                 if (dest->reserved >= dest->size)
5122                                         dest->full = 1;
5123                                 num_bytes -= bytes_to_add;
5124                         }
5125                         spin_unlock(&dest->lock);
5126                 }
5127                 if (num_bytes) {
5128                         spin_lock(&space_info->lock);
5129                         space_info->bytes_may_use -= num_bytes;
5130                         trace_btrfs_space_reservation(fs_info, "space_info",
5131                                         space_info->flags, num_bytes, 0);
5132                         spin_unlock(&space_info->lock);
5133                 }
5134         }
5135 }
5136
5137 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5138                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5139 {
5140         int ret;
5141
5142         ret = block_rsv_use_bytes(src, num_bytes);
5143         if (ret)
5144                 return ret;
5145
5146         block_rsv_add_bytes(dst, num_bytes, 1);
5147         return 0;
5148 }
5149
5150 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5151 {
5152         memset(rsv, 0, sizeof(*rsv));
5153         spin_lock_init(&rsv->lock);
5154         rsv->type = type;
5155 }
5156
5157 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5158                                               unsigned short type)
5159 {
5160         struct btrfs_block_rsv *block_rsv;
5161         struct btrfs_fs_info *fs_info = root->fs_info;
5162
5163         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5164         if (!block_rsv)
5165                 return NULL;
5166
5167         btrfs_init_block_rsv(block_rsv, type);
5168         block_rsv->space_info = __find_space_info(fs_info,
5169                                                   BTRFS_BLOCK_GROUP_METADATA);
5170         return block_rsv;
5171 }
5172
5173 void btrfs_free_block_rsv(struct btrfs_root *root,
5174                           struct btrfs_block_rsv *rsv)
5175 {
5176         if (!rsv)
5177                 return;
5178         btrfs_block_rsv_release(root, rsv, (u64)-1);
5179         kfree(rsv);
5180 }
5181
5182 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5183 {
5184         kfree(rsv);
5185 }
5186
5187 int btrfs_block_rsv_add(struct btrfs_root *root,
5188                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5189                         enum btrfs_reserve_flush_enum flush)
5190 {
5191         int ret;
5192
5193         if (num_bytes == 0)
5194                 return 0;
5195
5196         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5197         if (!ret) {
5198                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5199                 return 0;
5200         }
5201
5202         return ret;
5203 }
5204
5205 int btrfs_block_rsv_check(struct btrfs_root *root,
5206                           struct btrfs_block_rsv *block_rsv, int min_factor)
5207 {
5208         u64 num_bytes = 0;
5209         int ret = -ENOSPC;
5210
5211         if (!block_rsv)
5212                 return 0;
5213
5214         spin_lock(&block_rsv->lock);
5215         num_bytes = div_factor(block_rsv->size, min_factor);
5216         if (block_rsv->reserved >= num_bytes)
5217                 ret = 0;
5218         spin_unlock(&block_rsv->lock);
5219
5220         return ret;
5221 }
5222
5223 int btrfs_block_rsv_refill(struct btrfs_root *root,
5224                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5225                            enum btrfs_reserve_flush_enum flush)
5226 {
5227         u64 num_bytes = 0;
5228         int ret = -ENOSPC;
5229
5230         if (!block_rsv)
5231                 return 0;
5232
5233         spin_lock(&block_rsv->lock);
5234         num_bytes = min_reserved;
5235         if (block_rsv->reserved >= num_bytes)
5236                 ret = 0;
5237         else
5238                 num_bytes -= block_rsv->reserved;
5239         spin_unlock(&block_rsv->lock);
5240
5241         if (!ret)
5242                 return 0;
5243
5244         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5245         if (!ret) {
5246                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5247                 return 0;
5248         }
5249
5250         return ret;
5251 }
5252
5253 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5254                             struct btrfs_block_rsv *dst_rsv,
5255                             u64 num_bytes)
5256 {
5257         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5258 }
5259
5260 void btrfs_block_rsv_release(struct btrfs_root *root,
5261                              struct btrfs_block_rsv *block_rsv,
5262                              u64 num_bytes)
5263 {
5264         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5265         if (global_rsv == block_rsv ||
5266             block_rsv->space_info != global_rsv->space_info)
5267                 global_rsv = NULL;
5268         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5269                                 num_bytes);
5270 }
5271
5272 /*
5273  * helper to calculate size of global block reservation.
5274  * the desired value is sum of space used by extent tree,
5275  * checksum tree and root tree
5276  */
5277 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5278 {
5279         struct btrfs_space_info *sinfo;
5280         u64 num_bytes;
5281         u64 meta_used;
5282         u64 data_used;
5283         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5284
5285         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5286         spin_lock(&sinfo->lock);
5287         data_used = sinfo->bytes_used;
5288         spin_unlock(&sinfo->lock);
5289
5290         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5291         spin_lock(&sinfo->lock);
5292         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5293                 data_used = 0;
5294         meta_used = sinfo->bytes_used;
5295         spin_unlock(&sinfo->lock);
5296
5297         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5298                     csum_size * 2;
5299         num_bytes += div_u64(data_used + meta_used, 50);
5300
5301         if (num_bytes * 3 > meta_used)
5302                 num_bytes = div_u64(meta_used, 3);
5303
5304         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5305 }
5306
5307 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5308 {
5309         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5310         struct btrfs_space_info *sinfo = block_rsv->space_info;
5311         u64 num_bytes;
5312
5313         num_bytes = calc_global_metadata_size(fs_info);
5314
5315         spin_lock(&sinfo->lock);
5316         spin_lock(&block_rsv->lock);
5317
5318         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5319
5320         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5321                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5322                     sinfo->bytes_may_use;
5323
5324         if (sinfo->total_bytes > num_bytes) {
5325                 num_bytes = sinfo->total_bytes - num_bytes;
5326                 block_rsv->reserved += num_bytes;
5327                 sinfo->bytes_may_use += num_bytes;
5328                 trace_btrfs_space_reservation(fs_info, "space_info",
5329                                       sinfo->flags, num_bytes, 1);
5330         }
5331
5332         if (block_rsv->reserved >= block_rsv->size) {
5333                 num_bytes = block_rsv->reserved - block_rsv->size;
5334                 sinfo->bytes_may_use -= num_bytes;
5335                 trace_btrfs_space_reservation(fs_info, "space_info",
5336                                       sinfo->flags, num_bytes, 0);
5337                 block_rsv->reserved = block_rsv->size;
5338                 block_rsv->full = 1;
5339         }
5340
5341         spin_unlock(&block_rsv->lock);
5342         spin_unlock(&sinfo->lock);
5343 }
5344
5345 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5346 {
5347         struct btrfs_space_info *space_info;
5348
5349         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5350         fs_info->chunk_block_rsv.space_info = space_info;
5351
5352         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5353         fs_info->global_block_rsv.space_info = space_info;
5354         fs_info->delalloc_block_rsv.space_info = space_info;
5355         fs_info->trans_block_rsv.space_info = space_info;
5356         fs_info->empty_block_rsv.space_info = space_info;
5357         fs_info->delayed_block_rsv.space_info = space_info;
5358
5359         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5360         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5361         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5362         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5363         if (fs_info->quota_root)
5364                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5365         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5366
5367         update_global_block_rsv(fs_info);
5368 }
5369
5370 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5371 {
5372         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5373                                 (u64)-1);
5374         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5375         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5376         WARN_ON(fs_info->trans_block_rsv.size > 0);
5377         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5378         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5379         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5380         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5381         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5382 }
5383
5384 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5385                                   struct btrfs_root *root)
5386 {
5387         if (!trans->block_rsv)
5388                 return;
5389
5390         if (!trans->bytes_reserved)
5391                 return;
5392
5393         trace_btrfs_space_reservation(root->fs_info, "transaction",
5394                                       trans->transid, trans->bytes_reserved, 0);
5395         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5396         trans->bytes_reserved = 0;
5397 }
5398
5399 /*
5400  * To be called after all the new block groups attached to the transaction
5401  * handle have been created (btrfs_create_pending_block_groups()).
5402  */
5403 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5404 {
5405         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5406
5407         if (!trans->chunk_bytes_reserved)
5408                 return;
5409
5410         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5411
5412         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5413                                 trans->chunk_bytes_reserved);
5414         trans->chunk_bytes_reserved = 0;
5415 }
5416
5417 /* Can only return 0 or -ENOSPC */
5418 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5419                                   struct inode *inode)
5420 {
5421         struct btrfs_root *root = BTRFS_I(inode)->root;
5422         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5423         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5424
5425         /*
5426          * We need to hold space in order to delete our orphan item once we've
5427          * added it, so this takes the reservation so we can release it later
5428          * when we are truly done with the orphan item.
5429          */
5430         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5431         trace_btrfs_space_reservation(root->fs_info, "orphan",
5432                                       btrfs_ino(inode), num_bytes, 1);
5433         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5434 }
5435
5436 void btrfs_orphan_release_metadata(struct inode *inode)
5437 {
5438         struct btrfs_root *root = BTRFS_I(inode)->root;
5439         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5440         trace_btrfs_space_reservation(root->fs_info, "orphan",
5441                                       btrfs_ino(inode), num_bytes, 0);
5442         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5443 }
5444
5445 /*
5446  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5447  * root: the root of the parent directory
5448  * rsv: block reservation
5449  * items: the number of items that we need do reservation
5450  * qgroup_reserved: used to return the reserved size in qgroup
5451  *
5452  * This function is used to reserve the space for snapshot/subvolume
5453  * creation and deletion. Those operations are different with the
5454  * common file/directory operations, they change two fs/file trees
5455  * and root tree, the number of items that the qgroup reserves is
5456  * different with the free space reservation. So we can not use
5457  * the space reseravtion mechanism in start_transaction().
5458  */
5459 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5460                                      struct btrfs_block_rsv *rsv,
5461                                      int items,
5462                                      u64 *qgroup_reserved,
5463                                      bool use_global_rsv)
5464 {
5465         u64 num_bytes;
5466         int ret;
5467         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5468
5469         if (root->fs_info->quota_enabled) {
5470                 /* One for parent inode, two for dir entries */
5471                 num_bytes = 3 * root->nodesize;
5472                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5473                 if (ret)
5474                         return ret;
5475         } else {
5476                 num_bytes = 0;
5477         }
5478
5479         *qgroup_reserved = num_bytes;
5480
5481         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5482         rsv->space_info = __find_space_info(root->fs_info,
5483                                             BTRFS_BLOCK_GROUP_METADATA);
5484         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5485                                   BTRFS_RESERVE_FLUSH_ALL);
5486
5487         if (ret == -ENOSPC && use_global_rsv)
5488                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5489
5490         if (ret && *qgroup_reserved)
5491                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5492
5493         return ret;
5494 }
5495
5496 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5497                                       struct btrfs_block_rsv *rsv,
5498                                       u64 qgroup_reserved)
5499 {
5500         btrfs_block_rsv_release(root, rsv, (u64)-1);
5501 }
5502
5503 /**
5504  * drop_outstanding_extent - drop an outstanding extent
5505  * @inode: the inode we're dropping the extent for
5506  * @num_bytes: the number of bytes we're relaseing.
5507  *
5508  * This is called when we are freeing up an outstanding extent, either called
5509  * after an error or after an extent is written.  This will return the number of
5510  * reserved extents that need to be freed.  This must be called with
5511  * BTRFS_I(inode)->lock held.
5512  */
5513 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5514 {
5515         unsigned drop_inode_space = 0;
5516         unsigned dropped_extents = 0;
5517         unsigned num_extents = 0;
5518
5519         num_extents = (unsigned)div64_u64(num_bytes +
5520                                           BTRFS_MAX_EXTENT_SIZE - 1,
5521                                           BTRFS_MAX_EXTENT_SIZE);
5522         ASSERT(num_extents);
5523         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5524         BTRFS_I(inode)->outstanding_extents -= num_extents;
5525
5526         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5527             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5528                                &BTRFS_I(inode)->runtime_flags))
5529                 drop_inode_space = 1;
5530
5531         /*
5532          * If we have more or the same amount of outsanding extents than we have
5533          * reserved then we need to leave the reserved extents count alone.
5534          */
5535         if (BTRFS_I(inode)->outstanding_extents >=
5536             BTRFS_I(inode)->reserved_extents)
5537                 return drop_inode_space;
5538
5539         dropped_extents = BTRFS_I(inode)->reserved_extents -
5540                 BTRFS_I(inode)->outstanding_extents;
5541         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5542         return dropped_extents + drop_inode_space;
5543 }
5544
5545 /**
5546  * calc_csum_metadata_size - return the amount of metada space that must be
5547  *      reserved/free'd for the given bytes.
5548  * @inode: the inode we're manipulating
5549  * @num_bytes: the number of bytes in question
5550  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5551  *
5552  * This adjusts the number of csum_bytes in the inode and then returns the
5553  * correct amount of metadata that must either be reserved or freed.  We
5554  * calculate how many checksums we can fit into one leaf and then divide the
5555  * number of bytes that will need to be checksumed by this value to figure out
5556  * how many checksums will be required.  If we are adding bytes then the number
5557  * may go up and we will return the number of additional bytes that must be
5558  * reserved.  If it is going down we will return the number of bytes that must
5559  * be freed.
5560  *
5561  * This must be called with BTRFS_I(inode)->lock held.
5562  */
5563 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5564                                    int reserve)
5565 {
5566         struct btrfs_root *root = BTRFS_I(inode)->root;
5567         u64 old_csums, num_csums;
5568
5569         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5570             BTRFS_I(inode)->csum_bytes == 0)
5571                 return 0;
5572
5573         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5574         if (reserve)
5575                 BTRFS_I(inode)->csum_bytes += num_bytes;
5576         else
5577                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5578         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5579
5580         /* No change, no need to reserve more */
5581         if (old_csums == num_csums)
5582                 return 0;
5583
5584         if (reserve)
5585                 return btrfs_calc_trans_metadata_size(root,
5586                                                       num_csums - old_csums);
5587
5588         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5589 }
5590
5591 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5592 {
5593         struct btrfs_root *root = BTRFS_I(inode)->root;
5594         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5595         u64 to_reserve = 0;
5596         u64 csum_bytes;
5597         unsigned nr_extents = 0;
5598         int extra_reserve = 0;
5599         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5600         int ret = 0;
5601         bool delalloc_lock = true;
5602         u64 to_free = 0;
5603         unsigned dropped;
5604
5605         /* If we are a free space inode we need to not flush since we will be in
5606          * the middle of a transaction commit.  We also don't need the delalloc
5607          * mutex since we won't race with anybody.  We need this mostly to make
5608          * lockdep shut its filthy mouth.
5609          */
5610         if (btrfs_is_free_space_inode(inode)) {
5611                 flush = BTRFS_RESERVE_NO_FLUSH;
5612                 delalloc_lock = false;
5613         }
5614
5615         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5616             btrfs_transaction_in_commit(root->fs_info))
5617                 schedule_timeout(1);
5618
5619         if (delalloc_lock)
5620                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5621
5622         num_bytes = ALIGN(num_bytes, root->sectorsize);
5623
5624         spin_lock(&BTRFS_I(inode)->lock);
5625         nr_extents = (unsigned)div64_u64(num_bytes +
5626                                          BTRFS_MAX_EXTENT_SIZE - 1,
5627                                          BTRFS_MAX_EXTENT_SIZE);
5628         BTRFS_I(inode)->outstanding_extents += nr_extents;
5629         nr_extents = 0;
5630
5631         if (BTRFS_I(inode)->outstanding_extents >
5632             BTRFS_I(inode)->reserved_extents)
5633                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5634                         BTRFS_I(inode)->reserved_extents;
5635
5636         /*
5637          * Add an item to reserve for updating the inode when we complete the
5638          * delalloc io.
5639          */
5640         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5641                       &BTRFS_I(inode)->runtime_flags)) {
5642                 nr_extents++;
5643                 extra_reserve = 1;
5644         }
5645
5646         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5647         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5648         csum_bytes = BTRFS_I(inode)->csum_bytes;
5649         spin_unlock(&BTRFS_I(inode)->lock);
5650
5651         if (root->fs_info->quota_enabled) {
5652                 ret = btrfs_qgroup_reserve_meta(root,
5653                                 nr_extents * root->nodesize);
5654                 if (ret)
5655                         goto out_fail;
5656         }
5657
5658         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5659         if (unlikely(ret)) {
5660                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5661                 goto out_fail;
5662         }
5663
5664         spin_lock(&BTRFS_I(inode)->lock);
5665         if (extra_reserve) {
5666                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5667                         &BTRFS_I(inode)->runtime_flags);
5668                 nr_extents--;
5669         }
5670         BTRFS_I(inode)->reserved_extents += nr_extents;
5671         spin_unlock(&BTRFS_I(inode)->lock);
5672
5673         if (delalloc_lock)
5674                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5675
5676         if (to_reserve)
5677                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5678                                               btrfs_ino(inode), to_reserve, 1);
5679         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5680
5681         return 0;
5682
5683 out_fail:
5684         spin_lock(&BTRFS_I(inode)->lock);
5685         dropped = drop_outstanding_extent(inode, num_bytes);
5686         /*
5687          * If the inodes csum_bytes is the same as the original
5688          * csum_bytes then we know we haven't raced with any free()ers
5689          * so we can just reduce our inodes csum bytes and carry on.
5690          */
5691         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5692                 calc_csum_metadata_size(inode, num_bytes, 0);
5693         } else {
5694                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5695                 u64 bytes;
5696
5697                 /*
5698                  * This is tricky, but first we need to figure out how much we
5699                  * free'd from any free-ers that occured during this
5700                  * reservation, so we reset ->csum_bytes to the csum_bytes
5701                  * before we dropped our lock, and then call the free for the
5702                  * number of bytes that were freed while we were trying our
5703                  * reservation.
5704                  */
5705                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5706                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5707                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5708
5709
5710                 /*
5711                  * Now we need to see how much we would have freed had we not
5712                  * been making this reservation and our ->csum_bytes were not
5713                  * artificially inflated.
5714                  */
5715                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5716                 bytes = csum_bytes - orig_csum_bytes;
5717                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5718
5719                 /*
5720                  * Now reset ->csum_bytes to what it should be.  If bytes is
5721                  * more than to_free then we would have free'd more space had we
5722                  * not had an artificially high ->csum_bytes, so we need to free
5723                  * the remainder.  If bytes is the same or less then we don't
5724                  * need to do anything, the other free-ers did the correct
5725                  * thing.
5726                  */
5727                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5728                 if (bytes > to_free)
5729                         to_free = bytes - to_free;
5730                 else
5731                         to_free = 0;
5732         }
5733         spin_unlock(&BTRFS_I(inode)->lock);
5734         if (dropped)
5735                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5736
5737         if (to_free) {
5738                 btrfs_block_rsv_release(root, block_rsv, to_free);
5739                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5740                                               btrfs_ino(inode), to_free, 0);
5741         }
5742         if (delalloc_lock)
5743                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5744         return ret;
5745 }
5746
5747 /**
5748  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5749  * @inode: the inode to release the reservation for
5750  * @num_bytes: the number of bytes we're releasing
5751  *
5752  * This will release the metadata reservation for an inode.  This can be called
5753  * once we complete IO for a given set of bytes to release their metadata
5754  * reservations.
5755  */
5756 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5757 {
5758         struct btrfs_root *root = BTRFS_I(inode)->root;
5759         u64 to_free = 0;
5760         unsigned dropped;
5761
5762         num_bytes = ALIGN(num_bytes, root->sectorsize);
5763         spin_lock(&BTRFS_I(inode)->lock);
5764         dropped = drop_outstanding_extent(inode, num_bytes);
5765
5766         if (num_bytes)
5767                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5768         spin_unlock(&BTRFS_I(inode)->lock);
5769         if (dropped > 0)
5770                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5771
5772         if (btrfs_test_is_dummy_root(root))
5773                 return;
5774
5775         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5776                                       btrfs_ino(inode), to_free, 0);
5777
5778         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5779                                 to_free);
5780 }
5781
5782 /**
5783  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5784  * delalloc
5785  * @inode: inode we're writing to
5786  * @start: start range we are writing to
5787  * @len: how long the range we are writing to
5788  *
5789  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5790  *
5791  * This will do the following things
5792  *
5793  * o reserve space in data space info for num bytes
5794  *   and reserve precious corresponding qgroup space
5795  *   (Done in check_data_free_space)
5796  *
5797  * o reserve space for metadata space, based on the number of outstanding
5798  *   extents and how much csums will be needed
5799  *   also reserve metadata space in a per root over-reserve method.
5800  * o add to the inodes->delalloc_bytes
5801  * o add it to the fs_info's delalloc inodes list.
5802  *   (Above 3 all done in delalloc_reserve_metadata)
5803  *
5804  * Return 0 for success
5805  * Return <0 for error(-ENOSPC or -EQUOT)
5806  */
5807 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5808 {
5809         int ret;
5810
5811         ret = btrfs_check_data_free_space(inode, start, len);
5812         if (ret < 0)
5813                 return ret;
5814         ret = btrfs_delalloc_reserve_metadata(inode, len);
5815         if (ret < 0)
5816                 btrfs_free_reserved_data_space(inode, start, len);
5817         return ret;
5818 }
5819
5820 /**
5821  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5822  * @inode: inode we're releasing space for
5823  * @start: start position of the space already reserved
5824  * @len: the len of the space already reserved
5825  *
5826  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5827  * called in the case that we don't need the metadata AND data reservations
5828  * anymore.  So if there is an error or we insert an inline extent.
5829  *
5830  * This function will release the metadata space that was not used and will
5831  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5832  * list if there are no delalloc bytes left.
5833  * Also it will handle the qgroup reserved space.
5834  */
5835 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5836 {
5837         btrfs_delalloc_release_metadata(inode, len);
5838         btrfs_free_reserved_data_space(inode, start, len);
5839 }
5840
5841 static int update_block_group(struct btrfs_trans_handle *trans,
5842                               struct btrfs_root *root, u64 bytenr,
5843                               u64 num_bytes, int alloc)
5844 {
5845         struct btrfs_block_group_cache *cache = NULL;
5846         struct btrfs_fs_info *info = root->fs_info;
5847         u64 total = num_bytes;
5848         u64 old_val;
5849         u64 byte_in_group;
5850         int factor;
5851
5852         /* block accounting for super block */
5853         spin_lock(&info->delalloc_root_lock);
5854         old_val = btrfs_super_bytes_used(info->super_copy);
5855         if (alloc)
5856                 old_val += num_bytes;
5857         else
5858                 old_val -= num_bytes;
5859         btrfs_set_super_bytes_used(info->super_copy, old_val);
5860         spin_unlock(&info->delalloc_root_lock);
5861
5862         while (total) {
5863                 cache = btrfs_lookup_block_group(info, bytenr);
5864                 if (!cache)
5865                         return -ENOENT;
5866                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5867                                     BTRFS_BLOCK_GROUP_RAID1 |
5868                                     BTRFS_BLOCK_GROUP_RAID10))
5869                         factor = 2;
5870                 else
5871                         factor = 1;
5872                 /*
5873                  * If this block group has free space cache written out, we
5874                  * need to make sure to load it if we are removing space.  This
5875                  * is because we need the unpinning stage to actually add the
5876                  * space back to the block group, otherwise we will leak space.
5877                  */
5878                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5879                         cache_block_group(cache, 1);
5880
5881                 byte_in_group = bytenr - cache->key.objectid;
5882                 WARN_ON(byte_in_group > cache->key.offset);
5883
5884                 spin_lock(&cache->space_info->lock);
5885                 spin_lock(&cache->lock);
5886
5887                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5888                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5889                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5890
5891                 old_val = btrfs_block_group_used(&cache->item);
5892                 num_bytes = min(total, cache->key.offset - byte_in_group);
5893                 if (alloc) {
5894                         old_val += num_bytes;
5895                         btrfs_set_block_group_used(&cache->item, old_val);
5896                         cache->reserved -= num_bytes;
5897                         cache->space_info->bytes_reserved -= num_bytes;
5898                         cache->space_info->bytes_used += num_bytes;
5899                         cache->space_info->disk_used += num_bytes * factor;
5900                         spin_unlock(&cache->lock);
5901                         spin_unlock(&cache->space_info->lock);
5902                 } else {
5903                         old_val -= num_bytes;
5904                         btrfs_set_block_group_used(&cache->item, old_val);
5905                         cache->pinned += num_bytes;
5906                         cache->space_info->bytes_pinned += num_bytes;
5907                         cache->space_info->bytes_used -= num_bytes;
5908                         cache->space_info->disk_used -= num_bytes * factor;
5909                         spin_unlock(&cache->lock);
5910                         spin_unlock(&cache->space_info->lock);
5911
5912                         set_extent_dirty(info->pinned_extents,
5913                                          bytenr, bytenr + num_bytes - 1,
5914                                          GFP_NOFS | __GFP_NOFAIL);
5915                 }
5916
5917                 spin_lock(&trans->transaction->dirty_bgs_lock);
5918                 if (list_empty(&cache->dirty_list)) {
5919                         list_add_tail(&cache->dirty_list,
5920                                       &trans->transaction->dirty_bgs);
5921                                 trans->transaction->num_dirty_bgs++;
5922                         btrfs_get_block_group(cache);
5923                 }
5924                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5925
5926                 /*
5927                  * No longer have used bytes in this block group, queue it for
5928                  * deletion. We do this after adding the block group to the
5929                  * dirty list to avoid races between cleaner kthread and space
5930                  * cache writeout.
5931                  */
5932                 if (!alloc && old_val == 0) {
5933                         spin_lock(&info->unused_bgs_lock);
5934                         if (list_empty(&cache->bg_list)) {
5935                                 btrfs_get_block_group(cache);
5936                                 list_add_tail(&cache->bg_list,
5937                                               &info->unused_bgs);
5938                         }
5939                         spin_unlock(&info->unused_bgs_lock);
5940                 }
5941
5942                 btrfs_put_block_group(cache);
5943                 total -= num_bytes;
5944                 bytenr += num_bytes;
5945         }
5946         return 0;
5947 }
5948
5949 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5950 {
5951         struct btrfs_block_group_cache *cache;
5952         u64 bytenr;
5953
5954         spin_lock(&root->fs_info->block_group_cache_lock);
5955         bytenr = root->fs_info->first_logical_byte;
5956         spin_unlock(&root->fs_info->block_group_cache_lock);
5957
5958         if (bytenr < (u64)-1)
5959                 return bytenr;
5960
5961         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5962         if (!cache)
5963                 return 0;
5964
5965         bytenr = cache->key.objectid;
5966         btrfs_put_block_group(cache);
5967
5968         return bytenr;
5969 }
5970
5971 static int pin_down_extent(struct btrfs_root *root,
5972                            struct btrfs_block_group_cache *cache,
5973                            u64 bytenr, u64 num_bytes, int reserved)
5974 {
5975         spin_lock(&cache->space_info->lock);
5976         spin_lock(&cache->lock);
5977         cache->pinned += num_bytes;
5978         cache->space_info->bytes_pinned += num_bytes;
5979         if (reserved) {
5980                 cache->reserved -= num_bytes;
5981                 cache->space_info->bytes_reserved -= num_bytes;
5982         }
5983         spin_unlock(&cache->lock);
5984         spin_unlock(&cache->space_info->lock);
5985
5986         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5987                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5988         if (reserved)
5989                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5990         return 0;
5991 }
5992
5993 /*
5994  * this function must be called within transaction
5995  */
5996 int btrfs_pin_extent(struct btrfs_root *root,
5997                      u64 bytenr, u64 num_bytes, int reserved)
5998 {
5999         struct btrfs_block_group_cache *cache;
6000
6001         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6002         BUG_ON(!cache); /* Logic error */
6003
6004         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6005
6006         btrfs_put_block_group(cache);
6007         return 0;
6008 }
6009
6010 /*
6011  * this function must be called within transaction
6012  */
6013 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6014                                     u64 bytenr, u64 num_bytes)
6015 {
6016         struct btrfs_block_group_cache *cache;
6017         int ret;
6018
6019         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6020         if (!cache)
6021                 return -EINVAL;
6022
6023         /*
6024          * pull in the free space cache (if any) so that our pin
6025          * removes the free space from the cache.  We have load_only set
6026          * to one because the slow code to read in the free extents does check
6027          * the pinned extents.
6028          */
6029         cache_block_group(cache, 1);
6030
6031         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6032
6033         /* remove us from the free space cache (if we're there at all) */
6034         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6035         btrfs_put_block_group(cache);
6036         return ret;
6037 }
6038
6039 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6040 {
6041         int ret;
6042         struct btrfs_block_group_cache *block_group;
6043         struct btrfs_caching_control *caching_ctl;
6044
6045         block_group = btrfs_lookup_block_group(root->fs_info, start);
6046         if (!block_group)
6047                 return -EINVAL;
6048
6049         cache_block_group(block_group, 0);
6050         caching_ctl = get_caching_control(block_group);
6051
6052         if (!caching_ctl) {
6053                 /* Logic error */
6054                 BUG_ON(!block_group_cache_done(block_group));
6055                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6056         } else {
6057                 mutex_lock(&caching_ctl->mutex);
6058
6059                 if (start >= caching_ctl->progress) {
6060                         ret = add_excluded_extent(root, start, num_bytes);
6061                 } else if (start + num_bytes <= caching_ctl->progress) {
6062                         ret = btrfs_remove_free_space(block_group,
6063                                                       start, num_bytes);
6064                 } else {
6065                         num_bytes = caching_ctl->progress - start;
6066                         ret = btrfs_remove_free_space(block_group,
6067                                                       start, num_bytes);
6068                         if (ret)
6069                                 goto out_lock;
6070
6071                         num_bytes = (start + num_bytes) -
6072                                 caching_ctl->progress;
6073                         start = caching_ctl->progress;
6074                         ret = add_excluded_extent(root, start, num_bytes);
6075                 }
6076 out_lock:
6077                 mutex_unlock(&caching_ctl->mutex);
6078                 put_caching_control(caching_ctl);
6079         }
6080         btrfs_put_block_group(block_group);
6081         return ret;
6082 }
6083
6084 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6085                                  struct extent_buffer *eb)
6086 {
6087         struct btrfs_file_extent_item *item;
6088         struct btrfs_key key;
6089         int found_type;
6090         int i;
6091
6092         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6093                 return 0;
6094
6095         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6096                 btrfs_item_key_to_cpu(eb, &key, i);
6097                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6098                         continue;
6099                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6100                 found_type = btrfs_file_extent_type(eb, item);
6101                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6102                         continue;
6103                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6104                         continue;
6105                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6106                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6107                 __exclude_logged_extent(log, key.objectid, key.offset);
6108         }
6109
6110         return 0;
6111 }
6112
6113 /**
6114  * btrfs_update_reserved_bytes - update the block_group and space info counters
6115  * @cache:      The cache we are manipulating
6116  * @num_bytes:  The number of bytes in question
6117  * @reserve:    One of the reservation enums
6118  * @delalloc:   The blocks are allocated for the delalloc write
6119  *
6120  * This is called by the allocator when it reserves space, or by somebody who is
6121  * freeing space that was never actually used on disk.  For example if you
6122  * reserve some space for a new leaf in transaction A and before transaction A
6123  * commits you free that leaf, you call this with reserve set to 0 in order to
6124  * clear the reservation.
6125  *
6126  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6127  * ENOSPC accounting.  For data we handle the reservation through clearing the
6128  * delalloc bits in the io_tree.  We have to do this since we could end up
6129  * allocating less disk space for the amount of data we have reserved in the
6130  * case of compression.
6131  *
6132  * If this is a reservation and the block group has become read only we cannot
6133  * make the reservation and return -EAGAIN, otherwise this function always
6134  * succeeds.
6135  */
6136 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6137                                        u64 num_bytes, int reserve, int delalloc)
6138 {
6139         struct btrfs_space_info *space_info = cache->space_info;
6140         int ret = 0;
6141
6142         spin_lock(&space_info->lock);
6143         spin_lock(&cache->lock);
6144         if (reserve != RESERVE_FREE) {
6145                 if (cache->ro) {
6146                         ret = -EAGAIN;
6147                 } else {
6148                         cache->reserved += num_bytes;
6149                         space_info->bytes_reserved += num_bytes;
6150                         if (reserve == RESERVE_ALLOC) {
6151                                 trace_btrfs_space_reservation(cache->fs_info,
6152                                                 "space_info", space_info->flags,
6153                                                 num_bytes, 0);
6154                                 space_info->bytes_may_use -= num_bytes;
6155                         }
6156
6157                         if (delalloc)
6158                                 cache->delalloc_bytes += num_bytes;
6159                 }
6160         } else {
6161                 if (cache->ro)
6162                         space_info->bytes_readonly += num_bytes;
6163                 cache->reserved -= num_bytes;
6164                 space_info->bytes_reserved -= num_bytes;
6165
6166                 if (delalloc)
6167                         cache->delalloc_bytes -= num_bytes;
6168         }
6169         spin_unlock(&cache->lock);
6170         spin_unlock(&space_info->lock);
6171         return ret;
6172 }
6173
6174 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6175                                 struct btrfs_root *root)
6176 {
6177         struct btrfs_fs_info *fs_info = root->fs_info;
6178         struct btrfs_caching_control *next;
6179         struct btrfs_caching_control *caching_ctl;
6180         struct btrfs_block_group_cache *cache;
6181
6182         down_write(&fs_info->commit_root_sem);
6183
6184         list_for_each_entry_safe(caching_ctl, next,
6185                                  &fs_info->caching_block_groups, list) {
6186                 cache = caching_ctl->block_group;
6187                 if (block_group_cache_done(cache)) {
6188                         cache->last_byte_to_unpin = (u64)-1;
6189                         list_del_init(&caching_ctl->list);
6190                         put_caching_control(caching_ctl);
6191                 } else {
6192                         cache->last_byte_to_unpin = caching_ctl->progress;
6193                 }
6194         }
6195
6196         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6197                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6198         else
6199                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6200
6201         up_write(&fs_info->commit_root_sem);
6202
6203         update_global_block_rsv(fs_info);
6204 }
6205
6206 /*
6207  * Returns the free cluster for the given space info and sets empty_cluster to
6208  * what it should be based on the mount options.
6209  */
6210 static struct btrfs_free_cluster *
6211 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6212                    u64 *empty_cluster)
6213 {
6214         struct btrfs_free_cluster *ret = NULL;
6215         bool ssd = btrfs_test_opt(root, SSD);
6216
6217         *empty_cluster = 0;
6218         if (btrfs_mixed_space_info(space_info))
6219                 return ret;
6220
6221         if (ssd)
6222                 *empty_cluster = SZ_2M;
6223         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6224                 ret = &root->fs_info->meta_alloc_cluster;
6225                 if (!ssd)
6226                         *empty_cluster = SZ_64K;
6227         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6228                 ret = &root->fs_info->data_alloc_cluster;
6229         }
6230
6231         return ret;
6232 }
6233
6234 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6235                               const bool return_free_space)
6236 {
6237         struct btrfs_fs_info *fs_info = root->fs_info;
6238         struct btrfs_block_group_cache *cache = NULL;
6239         struct btrfs_space_info *space_info;
6240         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6241         struct btrfs_free_cluster *cluster = NULL;
6242         u64 len;
6243         u64 total_unpinned = 0;
6244         u64 empty_cluster = 0;
6245         bool readonly;
6246
6247         while (start <= end) {
6248                 readonly = false;
6249                 if (!cache ||
6250                     start >= cache->key.objectid + cache->key.offset) {
6251                         if (cache)
6252                                 btrfs_put_block_group(cache);
6253                         total_unpinned = 0;
6254                         cache = btrfs_lookup_block_group(fs_info, start);
6255                         BUG_ON(!cache); /* Logic error */
6256
6257                         cluster = fetch_cluster_info(root,
6258                                                      cache->space_info,
6259                                                      &empty_cluster);
6260                         empty_cluster <<= 1;
6261                 }
6262
6263                 len = cache->key.objectid + cache->key.offset - start;
6264                 len = min(len, end + 1 - start);
6265
6266                 if (start < cache->last_byte_to_unpin) {
6267                         len = min(len, cache->last_byte_to_unpin - start);
6268                         if (return_free_space)
6269                                 btrfs_add_free_space(cache, start, len);
6270                 }
6271
6272                 start += len;
6273                 total_unpinned += len;
6274                 space_info = cache->space_info;
6275
6276                 /*
6277                  * If this space cluster has been marked as fragmented and we've
6278                  * unpinned enough in this block group to potentially allow a
6279                  * cluster to be created inside of it go ahead and clear the
6280                  * fragmented check.
6281                  */
6282                 if (cluster && cluster->fragmented &&
6283                     total_unpinned > empty_cluster) {
6284                         spin_lock(&cluster->lock);
6285                         cluster->fragmented = 0;
6286                         spin_unlock(&cluster->lock);
6287                 }
6288
6289                 spin_lock(&space_info->lock);
6290                 spin_lock(&cache->lock);
6291                 cache->pinned -= len;
6292                 space_info->bytes_pinned -= len;
6293                 space_info->max_extent_size = 0;
6294                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6295                 if (cache->ro) {
6296                         space_info->bytes_readonly += len;
6297                         readonly = true;
6298                 }
6299                 spin_unlock(&cache->lock);
6300                 if (!readonly && global_rsv->space_info == space_info) {
6301                         spin_lock(&global_rsv->lock);
6302                         if (!global_rsv->full) {
6303                                 len = min(len, global_rsv->size -
6304                                           global_rsv->reserved);
6305                                 global_rsv->reserved += len;
6306                                 space_info->bytes_may_use += len;
6307                                 if (global_rsv->reserved >= global_rsv->size)
6308                                         global_rsv->full = 1;
6309                         }
6310                         spin_unlock(&global_rsv->lock);
6311                 }
6312                 spin_unlock(&space_info->lock);
6313         }
6314
6315         if (cache)
6316                 btrfs_put_block_group(cache);
6317         return 0;
6318 }
6319
6320 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6321                                struct btrfs_root *root)
6322 {
6323         struct btrfs_fs_info *fs_info = root->fs_info;
6324         struct btrfs_block_group_cache *block_group, *tmp;
6325         struct list_head *deleted_bgs;
6326         struct extent_io_tree *unpin;
6327         u64 start;
6328         u64 end;
6329         int ret;
6330
6331         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6332                 unpin = &fs_info->freed_extents[1];
6333         else
6334                 unpin = &fs_info->freed_extents[0];
6335
6336         while (!trans->aborted) {
6337                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6338                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6339                                             EXTENT_DIRTY, NULL);
6340                 if (ret) {
6341                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6342                         break;
6343                 }
6344
6345                 if (btrfs_test_opt(root, DISCARD))
6346                         ret = btrfs_discard_extent(root, start,
6347                                                    end + 1 - start, NULL);
6348
6349                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6350                 unpin_extent_range(root, start, end, true);
6351                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6352                 cond_resched();
6353         }
6354
6355         /*
6356          * Transaction is finished.  We don't need the lock anymore.  We
6357          * do need to clean up the block groups in case of a transaction
6358          * abort.
6359          */
6360         deleted_bgs = &trans->transaction->deleted_bgs;
6361         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6362                 u64 trimmed = 0;
6363
6364                 ret = -EROFS;
6365                 if (!trans->aborted)
6366                         ret = btrfs_discard_extent(root,
6367                                                    block_group->key.objectid,
6368                                                    block_group->key.offset,
6369                                                    &trimmed);
6370
6371                 list_del_init(&block_group->bg_list);
6372                 btrfs_put_block_group_trimming(block_group);
6373                 btrfs_put_block_group(block_group);
6374
6375                 if (ret) {
6376                         const char *errstr = btrfs_decode_error(ret);
6377                         btrfs_warn(fs_info,
6378                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6379                                    ret, errstr);
6380                 }
6381         }
6382
6383         return 0;
6384 }
6385
6386 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6387                              u64 owner, u64 root_objectid)
6388 {
6389         struct btrfs_space_info *space_info;
6390         u64 flags;
6391
6392         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6393                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6394                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6395                 else
6396                         flags = BTRFS_BLOCK_GROUP_METADATA;
6397         } else {
6398                 flags = BTRFS_BLOCK_GROUP_DATA;
6399         }
6400
6401         space_info = __find_space_info(fs_info, flags);
6402         BUG_ON(!space_info); /* Logic bug */
6403         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6404 }
6405
6406
6407 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6408                                 struct btrfs_root *root,
6409                                 struct btrfs_delayed_ref_node *node, u64 parent,
6410                                 u64 root_objectid, u64 owner_objectid,
6411                                 u64 owner_offset, int refs_to_drop,
6412                                 struct btrfs_delayed_extent_op *extent_op)
6413 {
6414         struct btrfs_key key;
6415         struct btrfs_path *path;
6416         struct btrfs_fs_info *info = root->fs_info;
6417         struct btrfs_root *extent_root = info->extent_root;
6418         struct extent_buffer *leaf;
6419         struct btrfs_extent_item *ei;
6420         struct btrfs_extent_inline_ref *iref;
6421         int ret;
6422         int is_data;
6423         int extent_slot = 0;
6424         int found_extent = 0;
6425         int num_to_del = 1;
6426         u32 item_size;
6427         u64 refs;
6428         u64 bytenr = node->bytenr;
6429         u64 num_bytes = node->num_bytes;
6430         int last_ref = 0;
6431         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6432                                                  SKINNY_METADATA);
6433
6434         path = btrfs_alloc_path();
6435         if (!path)
6436                 return -ENOMEM;
6437
6438         path->reada = 1;
6439         path->leave_spinning = 1;
6440
6441         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6442         BUG_ON(!is_data && refs_to_drop != 1);
6443
6444         if (is_data)
6445                 skinny_metadata = 0;
6446
6447         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6448                                     bytenr, num_bytes, parent,
6449                                     root_objectid, owner_objectid,
6450                                     owner_offset);
6451         if (ret == 0) {
6452                 extent_slot = path->slots[0];
6453                 while (extent_slot >= 0) {
6454                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6455                                               extent_slot);
6456                         if (key.objectid != bytenr)
6457                                 break;
6458                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6459                             key.offset == num_bytes) {
6460                                 found_extent = 1;
6461                                 break;
6462                         }
6463                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6464                             key.offset == owner_objectid) {
6465                                 found_extent = 1;
6466                                 break;
6467                         }
6468                         if (path->slots[0] - extent_slot > 5)
6469                                 break;
6470                         extent_slot--;
6471                 }
6472 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6473                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6474                 if (found_extent && item_size < sizeof(*ei))
6475                         found_extent = 0;
6476 #endif
6477                 if (!found_extent) {
6478                         BUG_ON(iref);
6479                         ret = remove_extent_backref(trans, extent_root, path,
6480                                                     NULL, refs_to_drop,
6481                                                     is_data, &last_ref);
6482                         if (ret) {
6483                                 btrfs_abort_transaction(trans, extent_root, ret);
6484                                 goto out;
6485                         }
6486                         btrfs_release_path(path);
6487                         path->leave_spinning = 1;
6488
6489                         key.objectid = bytenr;
6490                         key.type = BTRFS_EXTENT_ITEM_KEY;
6491                         key.offset = num_bytes;
6492
6493                         if (!is_data && skinny_metadata) {
6494                                 key.type = BTRFS_METADATA_ITEM_KEY;
6495                                 key.offset = owner_objectid;
6496                         }
6497
6498                         ret = btrfs_search_slot(trans, extent_root,
6499                                                 &key, path, -1, 1);
6500                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6501                                 /*
6502                                  * Couldn't find our skinny metadata item,
6503                                  * see if we have ye olde extent item.
6504                                  */
6505                                 path->slots[0]--;
6506                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6507                                                       path->slots[0]);
6508                                 if (key.objectid == bytenr &&
6509                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6510                                     key.offset == num_bytes)
6511                                         ret = 0;
6512                         }
6513
6514                         if (ret > 0 && skinny_metadata) {
6515                                 skinny_metadata = false;
6516                                 key.objectid = bytenr;
6517                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6518                                 key.offset = num_bytes;
6519                                 btrfs_release_path(path);
6520                                 ret = btrfs_search_slot(trans, extent_root,
6521                                                         &key, path, -1, 1);
6522                         }
6523
6524                         if (ret) {
6525                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6526                                         ret, bytenr);
6527                                 if (ret > 0)
6528                                         btrfs_print_leaf(extent_root,
6529                                                          path->nodes[0]);
6530                         }
6531                         if (ret < 0) {
6532                                 btrfs_abort_transaction(trans, extent_root, ret);
6533                                 goto out;
6534                         }
6535                         extent_slot = path->slots[0];
6536                 }
6537         } else if (WARN_ON(ret == -ENOENT)) {
6538                 btrfs_print_leaf(extent_root, path->nodes[0]);
6539                 btrfs_err(info,
6540                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6541                         bytenr, parent, root_objectid, owner_objectid,
6542                         owner_offset);
6543                 btrfs_abort_transaction(trans, extent_root, ret);
6544                 goto out;
6545         } else {
6546                 btrfs_abort_transaction(trans, extent_root, ret);
6547                 goto out;
6548         }
6549
6550         leaf = path->nodes[0];
6551         item_size = btrfs_item_size_nr(leaf, extent_slot);
6552 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6553         if (item_size < sizeof(*ei)) {
6554                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6555                 ret = convert_extent_item_v0(trans, extent_root, path,
6556                                              owner_objectid, 0);
6557                 if (ret < 0) {
6558                         btrfs_abort_transaction(trans, extent_root, ret);
6559                         goto out;
6560                 }
6561
6562                 btrfs_release_path(path);
6563                 path->leave_spinning = 1;
6564
6565                 key.objectid = bytenr;
6566                 key.type = BTRFS_EXTENT_ITEM_KEY;
6567                 key.offset = num_bytes;
6568
6569                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6570                                         -1, 1);
6571                 if (ret) {
6572                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6573                                 ret, bytenr);
6574                         btrfs_print_leaf(extent_root, path->nodes[0]);
6575                 }
6576                 if (ret < 0) {
6577                         btrfs_abort_transaction(trans, extent_root, ret);
6578                         goto out;
6579                 }
6580
6581                 extent_slot = path->slots[0];
6582                 leaf = path->nodes[0];
6583                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6584         }
6585 #endif
6586         BUG_ON(item_size < sizeof(*ei));
6587         ei = btrfs_item_ptr(leaf, extent_slot,
6588                             struct btrfs_extent_item);
6589         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6590             key.type == BTRFS_EXTENT_ITEM_KEY) {
6591                 struct btrfs_tree_block_info *bi;
6592                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6593                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6594                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6595         }
6596
6597         refs = btrfs_extent_refs(leaf, ei);
6598         if (refs < refs_to_drop) {
6599                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6600                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6601                 ret = -EINVAL;
6602                 btrfs_abort_transaction(trans, extent_root, ret);
6603                 goto out;
6604         }
6605         refs -= refs_to_drop;
6606
6607         if (refs > 0) {
6608                 if (extent_op)
6609                         __run_delayed_extent_op(extent_op, leaf, ei);
6610                 /*
6611                  * In the case of inline back ref, reference count will
6612                  * be updated by remove_extent_backref
6613                  */
6614                 if (iref) {
6615                         BUG_ON(!found_extent);
6616                 } else {
6617                         btrfs_set_extent_refs(leaf, ei, refs);
6618                         btrfs_mark_buffer_dirty(leaf);
6619                 }
6620                 if (found_extent) {
6621                         ret = remove_extent_backref(trans, extent_root, path,
6622                                                     iref, refs_to_drop,
6623                                                     is_data, &last_ref);
6624                         if (ret) {
6625                                 btrfs_abort_transaction(trans, extent_root, ret);
6626                                 goto out;
6627                         }
6628                 }
6629                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6630                                  root_objectid);
6631         } else {
6632                 if (found_extent) {
6633                         BUG_ON(is_data && refs_to_drop !=
6634                                extent_data_ref_count(path, iref));
6635                         if (iref) {
6636                                 BUG_ON(path->slots[0] != extent_slot);
6637                         } else {
6638                                 BUG_ON(path->slots[0] != extent_slot + 1);
6639                                 path->slots[0] = extent_slot;
6640                                 num_to_del = 2;
6641                         }
6642                 }
6643
6644                 last_ref = 1;
6645                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6646                                       num_to_del);
6647                 if (ret) {
6648                         btrfs_abort_transaction(trans, extent_root, ret);
6649                         goto out;
6650                 }
6651                 btrfs_release_path(path);
6652
6653                 if (is_data) {
6654                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6655                         if (ret) {
6656                                 btrfs_abort_transaction(trans, extent_root, ret);
6657                                 goto out;
6658                         }
6659                 }
6660
6661                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6662                 if (ret) {
6663                         btrfs_abort_transaction(trans, extent_root, ret);
6664                         goto out;
6665                 }
6666         }
6667         btrfs_release_path(path);
6668
6669 out:
6670         btrfs_free_path(path);
6671         return ret;
6672 }
6673
6674 /*
6675  * when we free an block, it is possible (and likely) that we free the last
6676  * delayed ref for that extent as well.  This searches the delayed ref tree for
6677  * a given extent, and if there are no other delayed refs to be processed, it
6678  * removes it from the tree.
6679  */
6680 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6681                                       struct btrfs_root *root, u64 bytenr)
6682 {
6683         struct btrfs_delayed_ref_head *head;
6684         struct btrfs_delayed_ref_root *delayed_refs;
6685         int ret = 0;
6686
6687         delayed_refs = &trans->transaction->delayed_refs;
6688         spin_lock(&delayed_refs->lock);
6689         head = btrfs_find_delayed_ref_head(trans, bytenr);
6690         if (!head)
6691                 goto out_delayed_unlock;
6692
6693         spin_lock(&head->lock);
6694         if (!list_empty(&head->ref_list))
6695                 goto out;
6696
6697         if (head->extent_op) {
6698                 if (!head->must_insert_reserved)
6699                         goto out;
6700                 btrfs_free_delayed_extent_op(head->extent_op);
6701                 head->extent_op = NULL;
6702         }
6703
6704         /*
6705          * waiting for the lock here would deadlock.  If someone else has it
6706          * locked they are already in the process of dropping it anyway
6707          */
6708         if (!mutex_trylock(&head->mutex))
6709                 goto out;
6710
6711         /*
6712          * at this point we have a head with no other entries.  Go
6713          * ahead and process it.
6714          */
6715         head->node.in_tree = 0;
6716         rb_erase(&head->href_node, &delayed_refs->href_root);
6717
6718         atomic_dec(&delayed_refs->num_entries);
6719
6720         /*
6721          * we don't take a ref on the node because we're removing it from the
6722          * tree, so we just steal the ref the tree was holding.
6723          */
6724         delayed_refs->num_heads--;
6725         if (head->processing == 0)
6726                 delayed_refs->num_heads_ready--;
6727         head->processing = 0;
6728         spin_unlock(&head->lock);
6729         spin_unlock(&delayed_refs->lock);
6730
6731         BUG_ON(head->extent_op);
6732         if (head->must_insert_reserved)
6733                 ret = 1;
6734
6735         mutex_unlock(&head->mutex);
6736         btrfs_put_delayed_ref(&head->node);
6737         return ret;
6738 out:
6739         spin_unlock(&head->lock);
6740
6741 out_delayed_unlock:
6742         spin_unlock(&delayed_refs->lock);
6743         return 0;
6744 }
6745
6746 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6747                            struct btrfs_root *root,
6748                            struct extent_buffer *buf,
6749                            u64 parent, int last_ref)
6750 {
6751         int pin = 1;
6752         int ret;
6753
6754         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6755                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6756                                         buf->start, buf->len,
6757                                         parent, root->root_key.objectid,
6758                                         btrfs_header_level(buf),
6759                                         BTRFS_DROP_DELAYED_REF, NULL);
6760                 BUG_ON(ret); /* -ENOMEM */
6761         }
6762
6763         if (!last_ref)
6764                 return;
6765
6766         if (btrfs_header_generation(buf) == trans->transid) {
6767                 struct btrfs_block_group_cache *cache;
6768
6769                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6770                         ret = check_ref_cleanup(trans, root, buf->start);
6771                         if (!ret)
6772                                 goto out;
6773                 }
6774
6775                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6776
6777                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6778                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6779                         btrfs_put_block_group(cache);
6780                         goto out;
6781                 }
6782
6783                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6784
6785                 btrfs_add_free_space(cache, buf->start, buf->len);
6786                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6787                 btrfs_put_block_group(cache);
6788                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6789                 pin = 0;
6790         }
6791 out:
6792         if (pin)
6793                 add_pinned_bytes(root->fs_info, buf->len,
6794                                  btrfs_header_level(buf),
6795                                  root->root_key.objectid);
6796
6797         /*
6798          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6799          * anymore.
6800          */
6801         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6802 }
6803
6804 /* Can return -ENOMEM */
6805 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6806                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6807                       u64 owner, u64 offset)
6808 {
6809         int ret;
6810         struct btrfs_fs_info *fs_info = root->fs_info;
6811
6812         if (btrfs_test_is_dummy_root(root))
6813                 return 0;
6814
6815         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6816
6817         /*
6818          * tree log blocks never actually go into the extent allocation
6819          * tree, just update pinning info and exit early.
6820          */
6821         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6822                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6823                 /* unlocks the pinned mutex */
6824                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6825                 ret = 0;
6826         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6827                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6828                                         num_bytes,
6829                                         parent, root_objectid, (int)owner,
6830                                         BTRFS_DROP_DELAYED_REF, NULL);
6831         } else {
6832                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6833                                                 num_bytes,
6834                                                 parent, root_objectid, owner,
6835                                                 offset, 0,
6836                                                 BTRFS_DROP_DELAYED_REF, NULL);
6837         }
6838         return ret;
6839 }
6840
6841 /*
6842  * when we wait for progress in the block group caching, its because
6843  * our allocation attempt failed at least once.  So, we must sleep
6844  * and let some progress happen before we try again.
6845  *
6846  * This function will sleep at least once waiting for new free space to
6847  * show up, and then it will check the block group free space numbers
6848  * for our min num_bytes.  Another option is to have it go ahead
6849  * and look in the rbtree for a free extent of a given size, but this
6850  * is a good start.
6851  *
6852  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6853  * any of the information in this block group.
6854  */
6855 static noinline void
6856 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6857                                 u64 num_bytes)
6858 {
6859         struct btrfs_caching_control *caching_ctl;
6860
6861         caching_ctl = get_caching_control(cache);
6862         if (!caching_ctl)
6863                 return;
6864
6865         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6866                    (cache->free_space_ctl->free_space >= num_bytes));
6867
6868         put_caching_control(caching_ctl);
6869 }
6870
6871 static noinline int
6872 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6873 {
6874         struct btrfs_caching_control *caching_ctl;
6875         int ret = 0;
6876
6877         caching_ctl = get_caching_control(cache);
6878         if (!caching_ctl)
6879                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6880
6881         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6882         if (cache->cached == BTRFS_CACHE_ERROR)
6883                 ret = -EIO;
6884         put_caching_control(caching_ctl);
6885         return ret;
6886 }
6887
6888 int __get_raid_index(u64 flags)
6889 {
6890         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6891                 return BTRFS_RAID_RAID10;
6892         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6893                 return BTRFS_RAID_RAID1;
6894         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6895                 return BTRFS_RAID_DUP;
6896         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6897                 return BTRFS_RAID_RAID0;
6898         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6899                 return BTRFS_RAID_RAID5;
6900         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6901                 return BTRFS_RAID_RAID6;
6902
6903         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6904 }
6905
6906 int get_block_group_index(struct btrfs_block_group_cache *cache)
6907 {
6908         return __get_raid_index(cache->flags);
6909 }
6910
6911 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6912         [BTRFS_RAID_RAID10]     = "raid10",
6913         [BTRFS_RAID_RAID1]      = "raid1",
6914         [BTRFS_RAID_DUP]        = "dup",
6915         [BTRFS_RAID_RAID0]      = "raid0",
6916         [BTRFS_RAID_SINGLE]     = "single",
6917         [BTRFS_RAID_RAID5]      = "raid5",
6918         [BTRFS_RAID_RAID6]      = "raid6",
6919 };
6920
6921 static const char *get_raid_name(enum btrfs_raid_types type)
6922 {
6923         if (type >= BTRFS_NR_RAID_TYPES)
6924                 return NULL;
6925
6926         return btrfs_raid_type_names[type];
6927 }
6928
6929 enum btrfs_loop_type {
6930         LOOP_CACHING_NOWAIT = 0,
6931         LOOP_CACHING_WAIT = 1,
6932         LOOP_ALLOC_CHUNK = 2,
6933         LOOP_NO_EMPTY_SIZE = 3,
6934 };
6935
6936 static inline void
6937 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6938                        int delalloc)
6939 {
6940         if (delalloc)
6941                 down_read(&cache->data_rwsem);
6942 }
6943
6944 static inline void
6945 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6946                        int delalloc)
6947 {
6948         btrfs_get_block_group(cache);
6949         if (delalloc)
6950                 down_read(&cache->data_rwsem);
6951 }
6952
6953 static struct btrfs_block_group_cache *
6954 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6955                    struct btrfs_free_cluster *cluster,
6956                    int delalloc)
6957 {
6958         struct btrfs_block_group_cache *used_bg;
6959         bool locked = false;
6960 again:
6961         spin_lock(&cluster->refill_lock);
6962         if (locked) {
6963                 if (used_bg == cluster->block_group)
6964                         return used_bg;
6965
6966                 up_read(&used_bg->data_rwsem);
6967                 btrfs_put_block_group(used_bg);
6968         }
6969
6970         used_bg = cluster->block_group;
6971         if (!used_bg)
6972                 return NULL;
6973
6974         if (used_bg == block_group)
6975                 return used_bg;
6976
6977         btrfs_get_block_group(used_bg);
6978
6979         if (!delalloc)
6980                 return used_bg;
6981
6982         if (down_read_trylock(&used_bg->data_rwsem))
6983                 return used_bg;
6984
6985         spin_unlock(&cluster->refill_lock);
6986         down_read(&used_bg->data_rwsem);
6987         locked = true;
6988         goto again;
6989 }
6990
6991 static inline void
6992 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6993                          int delalloc)
6994 {
6995         if (delalloc)
6996                 up_read(&cache->data_rwsem);
6997         btrfs_put_block_group(cache);
6998 }
6999
7000 /*
7001  * walks the btree of allocated extents and find a hole of a given size.
7002  * The key ins is changed to record the hole:
7003  * ins->objectid == start position
7004  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7005  * ins->offset == the size of the hole.
7006  * Any available blocks before search_start are skipped.
7007  *
7008  * If there is no suitable free space, we will record the max size of
7009  * the free space extent currently.
7010  */
7011 static noinline int find_free_extent(struct btrfs_root *orig_root,
7012                                      u64 num_bytes, u64 empty_size,
7013                                      u64 hint_byte, struct btrfs_key *ins,
7014                                      u64 flags, int delalloc)
7015 {
7016         int ret = 0;
7017         struct btrfs_root *root = orig_root->fs_info->extent_root;
7018         struct btrfs_free_cluster *last_ptr = NULL;
7019         struct btrfs_block_group_cache *block_group = NULL;
7020         u64 search_start = 0;
7021         u64 max_extent_size = 0;
7022         u64 empty_cluster = 0;
7023         struct btrfs_space_info *space_info;
7024         int loop = 0;
7025         int index = __get_raid_index(flags);
7026         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7027                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7028         bool failed_cluster_refill = false;
7029         bool failed_alloc = false;
7030         bool use_cluster = true;
7031         bool have_caching_bg = false;
7032         bool orig_have_caching_bg = false;
7033         bool full_search = false;
7034
7035         WARN_ON(num_bytes < root->sectorsize);
7036         ins->type = BTRFS_EXTENT_ITEM_KEY;
7037         ins->objectid = 0;
7038         ins->offset = 0;
7039
7040         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7041
7042         space_info = __find_space_info(root->fs_info, flags);
7043         if (!space_info) {
7044                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7045                 return -ENOSPC;
7046         }
7047
7048         /*
7049          * If our free space is heavily fragmented we may not be able to make
7050          * big contiguous allocations, so instead of doing the expensive search
7051          * for free space, simply return ENOSPC with our max_extent_size so we
7052          * can go ahead and search for a more manageable chunk.
7053          *
7054          * If our max_extent_size is large enough for our allocation simply
7055          * disable clustering since we will likely not be able to find enough
7056          * space to create a cluster and induce latency trying.
7057          */
7058         if (unlikely(space_info->max_extent_size)) {
7059                 spin_lock(&space_info->lock);
7060                 if (space_info->max_extent_size &&
7061                     num_bytes > space_info->max_extent_size) {
7062                         ins->offset = space_info->max_extent_size;
7063                         spin_unlock(&space_info->lock);
7064                         return -ENOSPC;
7065                 } else if (space_info->max_extent_size) {
7066                         use_cluster = false;
7067                 }
7068                 spin_unlock(&space_info->lock);
7069         }
7070
7071         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7072         if (last_ptr) {
7073                 spin_lock(&last_ptr->lock);
7074                 if (last_ptr->block_group)
7075                         hint_byte = last_ptr->window_start;
7076                 if (last_ptr->fragmented) {
7077                         /*
7078                          * We still set window_start so we can keep track of the
7079                          * last place we found an allocation to try and save
7080                          * some time.
7081                          */
7082                         hint_byte = last_ptr->window_start;
7083                         use_cluster = false;
7084                 }
7085                 spin_unlock(&last_ptr->lock);
7086         }
7087
7088         search_start = max(search_start, first_logical_byte(root, 0));
7089         search_start = max(search_start, hint_byte);
7090         if (search_start == hint_byte) {
7091                 block_group = btrfs_lookup_block_group(root->fs_info,
7092                                                        search_start);
7093                 /*
7094                  * we don't want to use the block group if it doesn't match our
7095                  * allocation bits, or if its not cached.
7096                  *
7097                  * However if we are re-searching with an ideal block group
7098                  * picked out then we don't care that the block group is cached.
7099                  */
7100                 if (block_group && block_group_bits(block_group, flags) &&
7101                     block_group->cached != BTRFS_CACHE_NO) {
7102                         down_read(&space_info->groups_sem);
7103                         if (list_empty(&block_group->list) ||
7104                             block_group->ro) {
7105                                 /*
7106                                  * someone is removing this block group,
7107                                  * we can't jump into the have_block_group
7108                                  * target because our list pointers are not
7109                                  * valid
7110                                  */
7111                                 btrfs_put_block_group(block_group);
7112                                 up_read(&space_info->groups_sem);
7113                         } else {
7114                                 index = get_block_group_index(block_group);
7115                                 btrfs_lock_block_group(block_group, delalloc);
7116                                 goto have_block_group;
7117                         }
7118                 } else if (block_group) {
7119                         btrfs_put_block_group(block_group);
7120                 }
7121         }
7122 search:
7123         have_caching_bg = false;
7124         if (index == 0 || index == __get_raid_index(flags))
7125                 full_search = true;
7126         down_read(&space_info->groups_sem);
7127         list_for_each_entry(block_group, &space_info->block_groups[index],
7128                             list) {
7129                 u64 offset;
7130                 int cached;
7131
7132                 btrfs_grab_block_group(block_group, delalloc);
7133                 search_start = block_group->key.objectid;
7134
7135                 /*
7136                  * this can happen if we end up cycling through all the
7137                  * raid types, but we want to make sure we only allocate
7138                  * for the proper type.
7139                  */
7140                 if (!block_group_bits(block_group, flags)) {
7141                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7142                                 BTRFS_BLOCK_GROUP_RAID1 |
7143                                 BTRFS_BLOCK_GROUP_RAID5 |
7144                                 BTRFS_BLOCK_GROUP_RAID6 |
7145                                 BTRFS_BLOCK_GROUP_RAID10;
7146
7147                         /*
7148                          * if they asked for extra copies and this block group
7149                          * doesn't provide them, bail.  This does allow us to
7150                          * fill raid0 from raid1.
7151                          */
7152                         if ((flags & extra) && !(block_group->flags & extra))
7153                                 goto loop;
7154                 }
7155
7156 have_block_group:
7157                 cached = block_group_cache_done(block_group);
7158                 if (unlikely(!cached)) {
7159                         have_caching_bg = true;
7160                         ret = cache_block_group(block_group, 0);
7161                         BUG_ON(ret < 0);
7162                         ret = 0;
7163                 }
7164
7165                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7166                         goto loop;
7167                 if (unlikely(block_group->ro))
7168                         goto loop;
7169
7170                 /*
7171                  * Ok we want to try and use the cluster allocator, so
7172                  * lets look there
7173                  */
7174                 if (last_ptr && use_cluster) {
7175                         struct btrfs_block_group_cache *used_block_group;
7176                         unsigned long aligned_cluster;
7177                         /*
7178                          * the refill lock keeps out other
7179                          * people trying to start a new cluster
7180                          */
7181                         used_block_group = btrfs_lock_cluster(block_group,
7182                                                               last_ptr,
7183                                                               delalloc);
7184                         if (!used_block_group)
7185                                 goto refill_cluster;
7186
7187                         if (used_block_group != block_group &&
7188                             (used_block_group->ro ||
7189                              !block_group_bits(used_block_group, flags)))
7190                                 goto release_cluster;
7191
7192                         offset = btrfs_alloc_from_cluster(used_block_group,
7193                                                 last_ptr,
7194                                                 num_bytes,
7195                                                 used_block_group->key.objectid,
7196                                                 &max_extent_size);
7197                         if (offset) {
7198                                 /* we have a block, we're done */
7199                                 spin_unlock(&last_ptr->refill_lock);
7200                                 trace_btrfs_reserve_extent_cluster(root,
7201                                                 used_block_group,
7202                                                 search_start, num_bytes);
7203                                 if (used_block_group != block_group) {
7204                                         btrfs_release_block_group(block_group,
7205                                                                   delalloc);
7206                                         block_group = used_block_group;
7207                                 }
7208                                 goto checks;
7209                         }
7210
7211                         WARN_ON(last_ptr->block_group != used_block_group);
7212 release_cluster:
7213                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7214                          * set up a new clusters, so lets just skip it
7215                          * and let the allocator find whatever block
7216                          * it can find.  If we reach this point, we
7217                          * will have tried the cluster allocator
7218                          * plenty of times and not have found
7219                          * anything, so we are likely way too
7220                          * fragmented for the clustering stuff to find
7221                          * anything.
7222                          *
7223                          * However, if the cluster is taken from the
7224                          * current block group, release the cluster
7225                          * first, so that we stand a better chance of
7226                          * succeeding in the unclustered
7227                          * allocation.  */
7228                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7229                             used_block_group != block_group) {
7230                                 spin_unlock(&last_ptr->refill_lock);
7231                                 btrfs_release_block_group(used_block_group,
7232                                                           delalloc);
7233                                 goto unclustered_alloc;
7234                         }
7235
7236                         /*
7237                          * this cluster didn't work out, free it and
7238                          * start over
7239                          */
7240                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7241
7242                         if (used_block_group != block_group)
7243                                 btrfs_release_block_group(used_block_group,
7244                                                           delalloc);
7245 refill_cluster:
7246                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7247                                 spin_unlock(&last_ptr->refill_lock);
7248                                 goto unclustered_alloc;
7249                         }
7250
7251                         aligned_cluster = max_t(unsigned long,
7252                                                 empty_cluster + empty_size,
7253                                               block_group->full_stripe_len);
7254
7255                         /* allocate a cluster in this block group */
7256                         ret = btrfs_find_space_cluster(root, block_group,
7257                                                        last_ptr, search_start,
7258                                                        num_bytes,
7259                                                        aligned_cluster);
7260                         if (ret == 0) {
7261                                 /*
7262                                  * now pull our allocation out of this
7263                                  * cluster
7264                                  */
7265                                 offset = btrfs_alloc_from_cluster(block_group,
7266                                                         last_ptr,
7267                                                         num_bytes,
7268                                                         search_start,
7269                                                         &max_extent_size);
7270                                 if (offset) {
7271                                         /* we found one, proceed */
7272                                         spin_unlock(&last_ptr->refill_lock);
7273                                         trace_btrfs_reserve_extent_cluster(root,
7274                                                 block_group, search_start,
7275                                                 num_bytes);
7276                                         goto checks;
7277                                 }
7278                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7279                                    && !failed_cluster_refill) {
7280                                 spin_unlock(&last_ptr->refill_lock);
7281
7282                                 failed_cluster_refill = true;
7283                                 wait_block_group_cache_progress(block_group,
7284                                        num_bytes + empty_cluster + empty_size);
7285                                 goto have_block_group;
7286                         }
7287
7288                         /*
7289                          * at this point we either didn't find a cluster
7290                          * or we weren't able to allocate a block from our
7291                          * cluster.  Free the cluster we've been trying
7292                          * to use, and go to the next block group
7293                          */
7294                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7295                         spin_unlock(&last_ptr->refill_lock);
7296                         goto loop;
7297                 }
7298
7299 unclustered_alloc:
7300                 /*
7301                  * We are doing an unclustered alloc, set the fragmented flag so
7302                  * we don't bother trying to setup a cluster again until we get
7303                  * more space.
7304                  */
7305                 if (unlikely(last_ptr)) {
7306                         spin_lock(&last_ptr->lock);
7307                         last_ptr->fragmented = 1;
7308                         spin_unlock(&last_ptr->lock);
7309                 }
7310                 spin_lock(&block_group->free_space_ctl->tree_lock);
7311                 if (cached &&
7312                     block_group->free_space_ctl->free_space <
7313                     num_bytes + empty_cluster + empty_size) {
7314                         if (block_group->free_space_ctl->free_space >
7315                             max_extent_size)
7316                                 max_extent_size =
7317                                         block_group->free_space_ctl->free_space;
7318                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7319                         goto loop;
7320                 }
7321                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7322
7323                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7324                                                     num_bytes, empty_size,
7325                                                     &max_extent_size);
7326                 /*
7327                  * If we didn't find a chunk, and we haven't failed on this
7328                  * block group before, and this block group is in the middle of
7329                  * caching and we are ok with waiting, then go ahead and wait
7330                  * for progress to be made, and set failed_alloc to true.
7331                  *
7332                  * If failed_alloc is true then we've already waited on this
7333                  * block group once and should move on to the next block group.
7334                  */
7335                 if (!offset && !failed_alloc && !cached &&
7336                     loop > LOOP_CACHING_NOWAIT) {
7337                         wait_block_group_cache_progress(block_group,
7338                                                 num_bytes + empty_size);
7339                         failed_alloc = true;
7340                         goto have_block_group;
7341                 } else if (!offset) {
7342                         goto loop;
7343                 }
7344 checks:
7345                 search_start = ALIGN(offset, root->stripesize);
7346
7347                 /* move on to the next group */
7348                 if (search_start + num_bytes >
7349                     block_group->key.objectid + block_group->key.offset) {
7350                         btrfs_add_free_space(block_group, offset, num_bytes);
7351                         goto loop;
7352                 }
7353
7354                 if (offset < search_start)
7355                         btrfs_add_free_space(block_group, offset,
7356                                              search_start - offset);
7357                 BUG_ON(offset > search_start);
7358
7359                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7360                                                   alloc_type, delalloc);
7361                 if (ret == -EAGAIN) {
7362                         btrfs_add_free_space(block_group, offset, num_bytes);
7363                         goto loop;
7364                 }
7365
7366                 /* we are all good, lets return */
7367                 ins->objectid = search_start;
7368                 ins->offset = num_bytes;
7369
7370                 trace_btrfs_reserve_extent(orig_root, block_group,
7371                                            search_start, num_bytes);
7372                 btrfs_release_block_group(block_group, delalloc);
7373                 break;
7374 loop:
7375                 failed_cluster_refill = false;
7376                 failed_alloc = false;
7377                 BUG_ON(index != get_block_group_index(block_group));
7378                 btrfs_release_block_group(block_group, delalloc);
7379         }
7380         up_read(&space_info->groups_sem);
7381
7382         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7383                 && !orig_have_caching_bg)
7384                 orig_have_caching_bg = true;
7385
7386         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7387                 goto search;
7388
7389         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7390                 goto search;
7391
7392         /*
7393          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7394          *                      caching kthreads as we move along
7395          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7396          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7397          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7398          *                      again
7399          */
7400         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7401                 index = 0;
7402                 if (loop == LOOP_CACHING_NOWAIT) {
7403                         /*
7404                          * We want to skip the LOOP_CACHING_WAIT step if we
7405                          * don't have any unached bgs and we've alrelady done a
7406                          * full search through.
7407                          */
7408                         if (orig_have_caching_bg || !full_search)
7409                                 loop = LOOP_CACHING_WAIT;
7410                         else
7411                                 loop = LOOP_ALLOC_CHUNK;
7412                 } else {
7413                         loop++;
7414                 }
7415
7416                 if (loop == LOOP_ALLOC_CHUNK) {
7417                         struct btrfs_trans_handle *trans;
7418                         int exist = 0;
7419
7420                         trans = current->journal_info;
7421                         if (trans)
7422                                 exist = 1;
7423                         else
7424                                 trans = btrfs_join_transaction(root);
7425
7426                         if (IS_ERR(trans)) {
7427                                 ret = PTR_ERR(trans);
7428                                 goto out;
7429                         }
7430
7431                         ret = do_chunk_alloc(trans, root, flags,
7432                                              CHUNK_ALLOC_FORCE);
7433
7434                         /*
7435                          * If we can't allocate a new chunk we've already looped
7436                          * through at least once, move on to the NO_EMPTY_SIZE
7437                          * case.
7438                          */
7439                         if (ret == -ENOSPC)
7440                                 loop = LOOP_NO_EMPTY_SIZE;
7441
7442                         /*
7443                          * Do not bail out on ENOSPC since we
7444                          * can do more things.
7445                          */
7446                         if (ret < 0 && ret != -ENOSPC)
7447                                 btrfs_abort_transaction(trans,
7448                                                         root, ret);
7449                         else
7450                                 ret = 0;
7451                         if (!exist)
7452                                 btrfs_end_transaction(trans, root);
7453                         if (ret)
7454                                 goto out;
7455                 }
7456
7457                 if (loop == LOOP_NO_EMPTY_SIZE) {
7458                         /*
7459                          * Don't loop again if we already have no empty_size and
7460                          * no empty_cluster.
7461                          */
7462                         if (empty_size == 0 &&
7463                             empty_cluster == 0) {
7464                                 ret = -ENOSPC;
7465                                 goto out;
7466                         }
7467                         empty_size = 0;
7468                         empty_cluster = 0;
7469                 }
7470
7471                 goto search;
7472         } else if (!ins->objectid) {
7473                 ret = -ENOSPC;
7474         } else if (ins->objectid) {
7475                 if (!use_cluster && last_ptr) {
7476                         spin_lock(&last_ptr->lock);
7477                         last_ptr->window_start = ins->objectid;
7478                         spin_unlock(&last_ptr->lock);
7479                 }
7480                 ret = 0;
7481         }
7482 out:
7483         if (ret == -ENOSPC) {
7484                 spin_lock(&space_info->lock);
7485                 space_info->max_extent_size = max_extent_size;
7486                 spin_unlock(&space_info->lock);
7487                 ins->offset = max_extent_size;
7488         }
7489         return ret;
7490 }
7491
7492 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7493                             int dump_block_groups)
7494 {
7495         struct btrfs_block_group_cache *cache;
7496         int index = 0;
7497
7498         spin_lock(&info->lock);
7499         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7500                info->flags,
7501                info->total_bytes - info->bytes_used - info->bytes_pinned -
7502                info->bytes_reserved - info->bytes_readonly,
7503                (info->full) ? "" : "not ");
7504         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7505                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7506                info->total_bytes, info->bytes_used, info->bytes_pinned,
7507                info->bytes_reserved, info->bytes_may_use,
7508                info->bytes_readonly);
7509         spin_unlock(&info->lock);
7510
7511         if (!dump_block_groups)
7512                 return;
7513
7514         down_read(&info->groups_sem);
7515 again:
7516         list_for_each_entry(cache, &info->block_groups[index], list) {
7517                 spin_lock(&cache->lock);
7518                 printk(KERN_INFO "BTRFS: "
7519                            "block group %llu has %llu bytes, "
7520                            "%llu used %llu pinned %llu reserved %s\n",
7521                        cache->key.objectid, cache->key.offset,
7522                        btrfs_block_group_used(&cache->item), cache->pinned,
7523                        cache->reserved, cache->ro ? "[readonly]" : "");
7524                 btrfs_dump_free_space(cache, bytes);
7525                 spin_unlock(&cache->lock);
7526         }
7527         if (++index < BTRFS_NR_RAID_TYPES)
7528                 goto again;
7529         up_read(&info->groups_sem);
7530 }
7531
7532 int btrfs_reserve_extent(struct btrfs_root *root,
7533                          u64 num_bytes, u64 min_alloc_size,
7534                          u64 empty_size, u64 hint_byte,
7535                          struct btrfs_key *ins, int is_data, int delalloc)
7536 {
7537         bool final_tried = num_bytes == min_alloc_size;
7538         u64 flags;
7539         int ret;
7540
7541         flags = btrfs_get_alloc_profile(root, is_data);
7542 again:
7543         WARN_ON(num_bytes < root->sectorsize);
7544         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7545                                flags, delalloc);
7546
7547         if (ret == -ENOSPC) {
7548                 if (!final_tried && ins->offset) {
7549                         num_bytes = min(num_bytes >> 1, ins->offset);
7550                         num_bytes = round_down(num_bytes, root->sectorsize);
7551                         num_bytes = max(num_bytes, min_alloc_size);
7552                         if (num_bytes == min_alloc_size)
7553                                 final_tried = true;
7554                         goto again;
7555                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7556                         struct btrfs_space_info *sinfo;
7557
7558                         sinfo = __find_space_info(root->fs_info, flags);
7559                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7560                                 flags, num_bytes);
7561                         if (sinfo)
7562                                 dump_space_info(sinfo, num_bytes, 1);
7563                 }
7564         }
7565
7566         return ret;
7567 }
7568
7569 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7570                                         u64 start, u64 len,
7571                                         int pin, int delalloc)
7572 {
7573         struct btrfs_block_group_cache *cache;
7574         int ret = 0;
7575
7576         cache = btrfs_lookup_block_group(root->fs_info, start);
7577         if (!cache) {
7578                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7579                         start);
7580                 return -ENOSPC;
7581         }
7582
7583         if (pin)
7584                 pin_down_extent(root, cache, start, len, 1);
7585         else {
7586                 if (btrfs_test_opt(root, DISCARD))
7587                         ret = btrfs_discard_extent(root, start, len, NULL);
7588                 btrfs_add_free_space(cache, start, len);
7589                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7590         }
7591
7592         btrfs_put_block_group(cache);
7593
7594         trace_btrfs_reserved_extent_free(root, start, len);
7595
7596         return ret;
7597 }
7598
7599 int btrfs_free_reserved_extent(struct btrfs_root *root,
7600                                u64 start, u64 len, int delalloc)
7601 {
7602         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7603 }
7604
7605 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7606                                        u64 start, u64 len)
7607 {
7608         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7609 }
7610
7611 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7612                                       struct btrfs_root *root,
7613                                       u64 parent, u64 root_objectid,
7614                                       u64 flags, u64 owner, u64 offset,
7615                                       struct btrfs_key *ins, int ref_mod)
7616 {
7617         int ret;
7618         struct btrfs_fs_info *fs_info = root->fs_info;
7619         struct btrfs_extent_item *extent_item;
7620         struct btrfs_extent_inline_ref *iref;
7621         struct btrfs_path *path;
7622         struct extent_buffer *leaf;
7623         int type;
7624         u32 size;
7625
7626         if (parent > 0)
7627                 type = BTRFS_SHARED_DATA_REF_KEY;
7628         else
7629                 type = BTRFS_EXTENT_DATA_REF_KEY;
7630
7631         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7632
7633         path = btrfs_alloc_path();
7634         if (!path)
7635                 return -ENOMEM;
7636
7637         path->leave_spinning = 1;
7638         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7639                                       ins, size);
7640         if (ret) {
7641                 btrfs_free_path(path);
7642                 return ret;
7643         }
7644
7645         leaf = path->nodes[0];
7646         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7647                                      struct btrfs_extent_item);
7648         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7649         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7650         btrfs_set_extent_flags(leaf, extent_item,
7651                                flags | BTRFS_EXTENT_FLAG_DATA);
7652
7653         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7654         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7655         if (parent > 0) {
7656                 struct btrfs_shared_data_ref *ref;
7657                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7658                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7659                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7660         } else {
7661                 struct btrfs_extent_data_ref *ref;
7662                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7663                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7664                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7665                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7666                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7667         }
7668
7669         btrfs_mark_buffer_dirty(path->nodes[0]);
7670         btrfs_free_path(path);
7671
7672         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7673         if (ret) { /* -ENOENT, logic error */
7674                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7675                         ins->objectid, ins->offset);
7676                 BUG();
7677         }
7678         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7679         return ret;
7680 }
7681
7682 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7683                                      struct btrfs_root *root,
7684                                      u64 parent, u64 root_objectid,
7685                                      u64 flags, struct btrfs_disk_key *key,
7686                                      int level, struct btrfs_key *ins)
7687 {
7688         int ret;
7689         struct btrfs_fs_info *fs_info = root->fs_info;
7690         struct btrfs_extent_item *extent_item;
7691         struct btrfs_tree_block_info *block_info;
7692         struct btrfs_extent_inline_ref *iref;
7693         struct btrfs_path *path;
7694         struct extent_buffer *leaf;
7695         u32 size = sizeof(*extent_item) + sizeof(*iref);
7696         u64 num_bytes = ins->offset;
7697         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7698                                                  SKINNY_METADATA);
7699
7700         if (!skinny_metadata)
7701                 size += sizeof(*block_info);
7702
7703         path = btrfs_alloc_path();
7704         if (!path) {
7705                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7706                                                    root->nodesize);
7707                 return -ENOMEM;
7708         }
7709
7710         path->leave_spinning = 1;
7711         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7712                                       ins, size);
7713         if (ret) {
7714                 btrfs_free_path(path);
7715                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7716                                                    root->nodesize);
7717                 return ret;
7718         }
7719
7720         leaf = path->nodes[0];
7721         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7722                                      struct btrfs_extent_item);
7723         btrfs_set_extent_refs(leaf, extent_item, 1);
7724         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7725         btrfs_set_extent_flags(leaf, extent_item,
7726                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7727
7728         if (skinny_metadata) {
7729                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7730                 num_bytes = root->nodesize;
7731         } else {
7732                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7733                 btrfs_set_tree_block_key(leaf, block_info, key);
7734                 btrfs_set_tree_block_level(leaf, block_info, level);
7735                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7736         }
7737
7738         if (parent > 0) {
7739                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7740                 btrfs_set_extent_inline_ref_type(leaf, iref,
7741                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7742                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7743         } else {
7744                 btrfs_set_extent_inline_ref_type(leaf, iref,
7745                                                  BTRFS_TREE_BLOCK_REF_KEY);
7746                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7747         }
7748
7749         btrfs_mark_buffer_dirty(leaf);
7750         btrfs_free_path(path);
7751
7752         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7753                                  1);
7754         if (ret) { /* -ENOENT, logic error */
7755                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7756                         ins->objectid, ins->offset);
7757                 BUG();
7758         }
7759
7760         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7761         return ret;
7762 }
7763
7764 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7765                                      struct btrfs_root *root,
7766                                      u64 root_objectid, u64 owner,
7767                                      u64 offset, u64 ram_bytes,
7768                                      struct btrfs_key *ins)
7769 {
7770         int ret;
7771
7772         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7773
7774         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7775                                          ins->offset, 0,
7776                                          root_objectid, owner, offset,
7777                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7778                                          NULL);
7779         return ret;
7780 }
7781
7782 /*
7783  * this is used by the tree logging recovery code.  It records that
7784  * an extent has been allocated and makes sure to clear the free
7785  * space cache bits as well
7786  */
7787 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7788                                    struct btrfs_root *root,
7789                                    u64 root_objectid, u64 owner, u64 offset,
7790                                    struct btrfs_key *ins)
7791 {
7792         int ret;
7793         struct btrfs_block_group_cache *block_group;
7794
7795         /*
7796          * Mixed block groups will exclude before processing the log so we only
7797          * need to do the exlude dance if this fs isn't mixed.
7798          */
7799         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7800                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7801                 if (ret)
7802                         return ret;
7803         }
7804
7805         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7806         if (!block_group)
7807                 return -EINVAL;
7808
7809         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7810                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7811         BUG_ON(ret); /* logic error */
7812         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7813                                          0, owner, offset, ins, 1);
7814         btrfs_put_block_group(block_group);
7815         return ret;
7816 }
7817
7818 static struct extent_buffer *
7819 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7820                       u64 bytenr, int level)
7821 {
7822         struct extent_buffer *buf;
7823
7824         buf = btrfs_find_create_tree_block(root, bytenr);
7825         if (!buf)
7826                 return ERR_PTR(-ENOMEM);
7827         btrfs_set_header_generation(buf, trans->transid);
7828         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7829         btrfs_tree_lock(buf);
7830         clean_tree_block(trans, root->fs_info, buf);
7831         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7832
7833         btrfs_set_lock_blocking(buf);
7834         btrfs_set_buffer_uptodate(buf);
7835
7836         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7837                 buf->log_index = root->log_transid % 2;
7838                 /*
7839                  * we allow two log transactions at a time, use different
7840                  * EXENT bit to differentiate dirty pages.
7841                  */
7842                 if (buf->log_index == 0)
7843                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7844                                         buf->start + buf->len - 1, GFP_NOFS);
7845                 else
7846                         set_extent_new(&root->dirty_log_pages, buf->start,
7847                                         buf->start + buf->len - 1, GFP_NOFS);
7848         } else {
7849                 buf->log_index = -1;
7850                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7851                          buf->start + buf->len - 1, GFP_NOFS);
7852         }
7853         trans->blocks_used++;
7854         /* this returns a buffer locked for blocking */
7855         return buf;
7856 }
7857
7858 static struct btrfs_block_rsv *
7859 use_block_rsv(struct btrfs_trans_handle *trans,
7860               struct btrfs_root *root, u32 blocksize)
7861 {
7862         struct btrfs_block_rsv *block_rsv;
7863         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7864         int ret;
7865         bool global_updated = false;
7866
7867         block_rsv = get_block_rsv(trans, root);
7868
7869         if (unlikely(block_rsv->size == 0))
7870                 goto try_reserve;
7871 again:
7872         ret = block_rsv_use_bytes(block_rsv, blocksize);
7873         if (!ret)
7874                 return block_rsv;
7875
7876         if (block_rsv->failfast)
7877                 return ERR_PTR(ret);
7878
7879         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7880                 global_updated = true;
7881                 update_global_block_rsv(root->fs_info);
7882                 goto again;
7883         }
7884
7885         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7886                 static DEFINE_RATELIMIT_STATE(_rs,
7887                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7888                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7889                 if (__ratelimit(&_rs))
7890                         WARN(1, KERN_DEBUG
7891                                 "BTRFS: block rsv returned %d\n", ret);
7892         }
7893 try_reserve:
7894         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7895                                      BTRFS_RESERVE_NO_FLUSH);
7896         if (!ret)
7897                 return block_rsv;
7898         /*
7899          * If we couldn't reserve metadata bytes try and use some from
7900          * the global reserve if its space type is the same as the global
7901          * reservation.
7902          */
7903         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7904             block_rsv->space_info == global_rsv->space_info) {
7905                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7906                 if (!ret)
7907                         return global_rsv;
7908         }
7909         return ERR_PTR(ret);
7910 }
7911
7912 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7913                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7914 {
7915         block_rsv_add_bytes(block_rsv, blocksize, 0);
7916         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7917 }
7918
7919 /*
7920  * finds a free extent and does all the dirty work required for allocation
7921  * returns the tree buffer or an ERR_PTR on error.
7922  */
7923 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7924                                         struct btrfs_root *root,
7925                                         u64 parent, u64 root_objectid,
7926                                         struct btrfs_disk_key *key, int level,
7927                                         u64 hint, u64 empty_size)
7928 {
7929         struct btrfs_key ins;
7930         struct btrfs_block_rsv *block_rsv;
7931         struct extent_buffer *buf;
7932         struct btrfs_delayed_extent_op *extent_op;
7933         u64 flags = 0;
7934         int ret;
7935         u32 blocksize = root->nodesize;
7936         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7937                                                  SKINNY_METADATA);
7938
7939         if (btrfs_test_is_dummy_root(root)) {
7940                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7941                                             level);
7942                 if (!IS_ERR(buf))
7943                         root->alloc_bytenr += blocksize;
7944                 return buf;
7945         }
7946
7947         block_rsv = use_block_rsv(trans, root, blocksize);
7948         if (IS_ERR(block_rsv))
7949                 return ERR_CAST(block_rsv);
7950
7951         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7952                                    empty_size, hint, &ins, 0, 0);
7953         if (ret)
7954                 goto out_unuse;
7955
7956         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7957         if (IS_ERR(buf)) {
7958                 ret = PTR_ERR(buf);
7959                 goto out_free_reserved;
7960         }
7961
7962         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7963                 if (parent == 0)
7964                         parent = ins.objectid;
7965                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7966         } else
7967                 BUG_ON(parent > 0);
7968
7969         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7970                 extent_op = btrfs_alloc_delayed_extent_op();
7971                 if (!extent_op) {
7972                         ret = -ENOMEM;
7973                         goto out_free_buf;
7974                 }
7975                 if (key)
7976                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7977                 else
7978                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7979                 extent_op->flags_to_set = flags;
7980                 if (skinny_metadata)
7981                         extent_op->update_key = 0;
7982                 else
7983                         extent_op->update_key = 1;
7984                 extent_op->update_flags = 1;
7985                 extent_op->is_data = 0;
7986                 extent_op->level = level;
7987
7988                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7989                                                  ins.objectid, ins.offset,
7990                                                  parent, root_objectid, level,
7991                                                  BTRFS_ADD_DELAYED_EXTENT,
7992                                                  extent_op);
7993                 if (ret)
7994                         goto out_free_delayed;
7995         }
7996         return buf;
7997
7998 out_free_delayed:
7999         btrfs_free_delayed_extent_op(extent_op);
8000 out_free_buf:
8001         free_extent_buffer(buf);
8002 out_free_reserved:
8003         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8004 out_unuse:
8005         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8006         return ERR_PTR(ret);
8007 }
8008
8009 struct walk_control {
8010         u64 refs[BTRFS_MAX_LEVEL];
8011         u64 flags[BTRFS_MAX_LEVEL];
8012         struct btrfs_key update_progress;
8013         int stage;
8014         int level;
8015         int shared_level;
8016         int update_ref;
8017         int keep_locks;
8018         int reada_slot;
8019         int reada_count;
8020         int for_reloc;
8021 };
8022
8023 #define DROP_REFERENCE  1
8024 #define UPDATE_BACKREF  2
8025
8026 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8027                                      struct btrfs_root *root,
8028                                      struct walk_control *wc,
8029                                      struct btrfs_path *path)
8030 {
8031         u64 bytenr;
8032         u64 generation;
8033         u64 refs;
8034         u64 flags;
8035         u32 nritems;
8036         u32 blocksize;
8037         struct btrfs_key key;
8038         struct extent_buffer *eb;
8039         int ret;
8040         int slot;
8041         int nread = 0;
8042
8043         if (path->slots[wc->level] < wc->reada_slot) {
8044                 wc->reada_count = wc->reada_count * 2 / 3;
8045                 wc->reada_count = max(wc->reada_count, 2);
8046         } else {
8047                 wc->reada_count = wc->reada_count * 3 / 2;
8048                 wc->reada_count = min_t(int, wc->reada_count,
8049                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8050         }
8051
8052         eb = path->nodes[wc->level];
8053         nritems = btrfs_header_nritems(eb);
8054         blocksize = root->nodesize;
8055
8056         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8057                 if (nread >= wc->reada_count)
8058                         break;
8059
8060                 cond_resched();
8061                 bytenr = btrfs_node_blockptr(eb, slot);
8062                 generation = btrfs_node_ptr_generation(eb, slot);
8063
8064                 if (slot == path->slots[wc->level])
8065                         goto reada;
8066
8067                 if (wc->stage == UPDATE_BACKREF &&
8068                     generation <= root->root_key.offset)
8069                         continue;
8070
8071                 /* We don't lock the tree block, it's OK to be racy here */
8072                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8073                                                wc->level - 1, 1, &refs,
8074                                                &flags);
8075                 /* We don't care about errors in readahead. */
8076                 if (ret < 0)
8077                         continue;
8078                 BUG_ON(refs == 0);
8079
8080                 if (wc->stage == DROP_REFERENCE) {
8081                         if (refs == 1)
8082                                 goto reada;
8083
8084                         if (wc->level == 1 &&
8085                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8086                                 continue;
8087                         if (!wc->update_ref ||
8088                             generation <= root->root_key.offset)
8089                                 continue;
8090                         btrfs_node_key_to_cpu(eb, &key, slot);
8091                         ret = btrfs_comp_cpu_keys(&key,
8092                                                   &wc->update_progress);
8093                         if (ret < 0)
8094                                 continue;
8095                 } else {
8096                         if (wc->level == 1 &&
8097                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8098                                 continue;
8099                 }
8100 reada:
8101                 readahead_tree_block(root, bytenr);
8102                 nread++;
8103         }
8104         wc->reada_slot = slot;
8105 }
8106
8107 /*
8108  * These may not be seen by the usual inc/dec ref code so we have to
8109  * add them here.
8110  */
8111 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8112                                      struct btrfs_root *root, u64 bytenr,
8113                                      u64 num_bytes)
8114 {
8115         struct btrfs_qgroup_extent_record *qrecord;
8116         struct btrfs_delayed_ref_root *delayed_refs;
8117
8118         qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8119         if (!qrecord)
8120                 return -ENOMEM;
8121
8122         qrecord->bytenr = bytenr;
8123         qrecord->num_bytes = num_bytes;
8124         qrecord->old_roots = NULL;
8125
8126         delayed_refs = &trans->transaction->delayed_refs;
8127         spin_lock(&delayed_refs->lock);
8128         if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8129                 kfree(qrecord);
8130         spin_unlock(&delayed_refs->lock);
8131
8132         return 0;
8133 }
8134
8135 static int account_leaf_items(struct btrfs_trans_handle *trans,
8136                               struct btrfs_root *root,
8137                               struct extent_buffer *eb)
8138 {
8139         int nr = btrfs_header_nritems(eb);
8140         int i, extent_type, ret;
8141         struct btrfs_key key;
8142         struct btrfs_file_extent_item *fi;
8143         u64 bytenr, num_bytes;
8144
8145         /* We can be called directly from walk_up_proc() */
8146         if (!root->fs_info->quota_enabled)
8147                 return 0;
8148
8149         for (i = 0; i < nr; i++) {
8150                 btrfs_item_key_to_cpu(eb, &key, i);
8151
8152                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8153                         continue;
8154
8155                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8156                 /* filter out non qgroup-accountable extents  */
8157                 extent_type = btrfs_file_extent_type(eb, fi);
8158
8159                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8160                         continue;
8161
8162                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8163                 if (!bytenr)
8164                         continue;
8165
8166                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8167
8168                 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8169                 if (ret)
8170                         return ret;
8171         }
8172         return 0;
8173 }
8174
8175 /*
8176  * Walk up the tree from the bottom, freeing leaves and any interior
8177  * nodes which have had all slots visited. If a node (leaf or
8178  * interior) is freed, the node above it will have it's slot
8179  * incremented. The root node will never be freed.
8180  *
8181  * At the end of this function, we should have a path which has all
8182  * slots incremented to the next position for a search. If we need to
8183  * read a new node it will be NULL and the node above it will have the
8184  * correct slot selected for a later read.
8185  *
8186  * If we increment the root nodes slot counter past the number of
8187  * elements, 1 is returned to signal completion of the search.
8188  */
8189 static int adjust_slots_upwards(struct btrfs_root *root,
8190                                 struct btrfs_path *path, int root_level)
8191 {
8192         int level = 0;
8193         int nr, slot;
8194         struct extent_buffer *eb;
8195
8196         if (root_level == 0)
8197                 return 1;
8198
8199         while (level <= root_level) {
8200                 eb = path->nodes[level];
8201                 nr = btrfs_header_nritems(eb);
8202                 path->slots[level]++;
8203                 slot = path->slots[level];
8204                 if (slot >= nr || level == 0) {
8205                         /*
8206                          * Don't free the root -  we will detect this
8207                          * condition after our loop and return a
8208                          * positive value for caller to stop walking the tree.
8209                          */
8210                         if (level != root_level) {
8211                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8212                                 path->locks[level] = 0;
8213
8214                                 free_extent_buffer(eb);
8215                                 path->nodes[level] = NULL;
8216                                 path->slots[level] = 0;
8217                         }
8218                 } else {
8219                         /*
8220                          * We have a valid slot to walk back down
8221                          * from. Stop here so caller can process these
8222                          * new nodes.
8223                          */
8224                         break;
8225                 }
8226
8227                 level++;
8228         }
8229
8230         eb = path->nodes[root_level];
8231         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8232                 return 1;
8233
8234         return 0;
8235 }
8236
8237 /*
8238  * root_eb is the subtree root and is locked before this function is called.
8239  */
8240 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8241                                   struct btrfs_root *root,
8242                                   struct extent_buffer *root_eb,
8243                                   u64 root_gen,
8244                                   int root_level)
8245 {
8246         int ret = 0;
8247         int level;
8248         struct extent_buffer *eb = root_eb;
8249         struct btrfs_path *path = NULL;
8250
8251         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8252         BUG_ON(root_eb == NULL);
8253
8254         if (!root->fs_info->quota_enabled)
8255                 return 0;
8256
8257         if (!extent_buffer_uptodate(root_eb)) {
8258                 ret = btrfs_read_buffer(root_eb, root_gen);
8259                 if (ret)
8260                         goto out;
8261         }
8262
8263         if (root_level == 0) {
8264                 ret = account_leaf_items(trans, root, root_eb);
8265                 goto out;
8266         }
8267
8268         path = btrfs_alloc_path();
8269         if (!path)
8270                 return -ENOMEM;
8271
8272         /*
8273          * Walk down the tree.  Missing extent blocks are filled in as
8274          * we go. Metadata is accounted every time we read a new
8275          * extent block.
8276          *
8277          * When we reach a leaf, we account for file extent items in it,
8278          * walk back up the tree (adjusting slot pointers as we go)
8279          * and restart the search process.
8280          */
8281         extent_buffer_get(root_eb); /* For path */
8282         path->nodes[root_level] = root_eb;
8283         path->slots[root_level] = 0;
8284         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8285 walk_down:
8286         level = root_level;
8287         while (level >= 0) {
8288                 if (path->nodes[level] == NULL) {
8289                         int parent_slot;
8290                         u64 child_gen;
8291                         u64 child_bytenr;
8292
8293                         /* We need to get child blockptr/gen from
8294                          * parent before we can read it. */
8295                         eb = path->nodes[level + 1];
8296                         parent_slot = path->slots[level + 1];
8297                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8298                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8299
8300                         eb = read_tree_block(root, child_bytenr, child_gen);
8301                         if (IS_ERR(eb)) {
8302                                 ret = PTR_ERR(eb);
8303                                 goto out;
8304                         } else if (!extent_buffer_uptodate(eb)) {
8305                                 free_extent_buffer(eb);
8306                                 ret = -EIO;
8307                                 goto out;
8308                         }
8309
8310                         path->nodes[level] = eb;
8311                         path->slots[level] = 0;
8312
8313                         btrfs_tree_read_lock(eb);
8314                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8315                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8316
8317                         ret = record_one_subtree_extent(trans, root, child_bytenr,
8318                                                         root->nodesize);
8319                         if (ret)
8320                                 goto out;
8321                 }
8322
8323                 if (level == 0) {
8324                         ret = account_leaf_items(trans, root, path->nodes[level]);
8325                         if (ret)
8326                                 goto out;
8327
8328                         /* Nonzero return here means we completed our search */
8329                         ret = adjust_slots_upwards(root, path, root_level);
8330                         if (ret)
8331                                 break;
8332
8333                         /* Restart search with new slots */
8334                         goto walk_down;
8335                 }
8336
8337                 level--;
8338         }
8339
8340         ret = 0;
8341 out:
8342         btrfs_free_path(path);
8343
8344         return ret;
8345 }
8346
8347 /*
8348  * helper to process tree block while walking down the tree.
8349  *
8350  * when wc->stage == UPDATE_BACKREF, this function updates
8351  * back refs for pointers in the block.
8352  *
8353  * NOTE: return value 1 means we should stop walking down.
8354  */
8355 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8356                                    struct btrfs_root *root,
8357                                    struct btrfs_path *path,
8358                                    struct walk_control *wc, int lookup_info)
8359 {
8360         int level = wc->level;
8361         struct extent_buffer *eb = path->nodes[level];
8362         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8363         int ret;
8364
8365         if (wc->stage == UPDATE_BACKREF &&
8366             btrfs_header_owner(eb) != root->root_key.objectid)
8367                 return 1;
8368
8369         /*
8370          * when reference count of tree block is 1, it won't increase
8371          * again. once full backref flag is set, we never clear it.
8372          */
8373         if (lookup_info &&
8374             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8375              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8376                 BUG_ON(!path->locks[level]);
8377                 ret = btrfs_lookup_extent_info(trans, root,
8378                                                eb->start, level, 1,
8379                                                &wc->refs[level],
8380                                                &wc->flags[level]);
8381                 BUG_ON(ret == -ENOMEM);
8382                 if (ret)
8383                         return ret;
8384                 BUG_ON(wc->refs[level] == 0);
8385         }
8386
8387         if (wc->stage == DROP_REFERENCE) {
8388                 if (wc->refs[level] > 1)
8389                         return 1;
8390
8391                 if (path->locks[level] && !wc->keep_locks) {
8392                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8393                         path->locks[level] = 0;
8394                 }
8395                 return 0;
8396         }
8397
8398         /* wc->stage == UPDATE_BACKREF */
8399         if (!(wc->flags[level] & flag)) {
8400                 BUG_ON(!path->locks[level]);
8401                 ret = btrfs_inc_ref(trans, root, eb, 1);
8402                 BUG_ON(ret); /* -ENOMEM */
8403                 ret = btrfs_dec_ref(trans, root, eb, 0);
8404                 BUG_ON(ret); /* -ENOMEM */
8405                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8406                                                   eb->len, flag,
8407                                                   btrfs_header_level(eb), 0);
8408                 BUG_ON(ret); /* -ENOMEM */
8409                 wc->flags[level] |= flag;
8410         }
8411
8412         /*
8413          * the block is shared by multiple trees, so it's not good to
8414          * keep the tree lock
8415          */
8416         if (path->locks[level] && level > 0) {
8417                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8418                 path->locks[level] = 0;
8419         }
8420         return 0;
8421 }
8422
8423 /*
8424  * helper to process tree block pointer.
8425  *
8426  * when wc->stage == DROP_REFERENCE, this function checks
8427  * reference count of the block pointed to. if the block
8428  * is shared and we need update back refs for the subtree
8429  * rooted at the block, this function changes wc->stage to
8430  * UPDATE_BACKREF. if the block is shared and there is no
8431  * need to update back, this function drops the reference
8432  * to the block.
8433  *
8434  * NOTE: return value 1 means we should stop walking down.
8435  */
8436 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8437                                  struct btrfs_root *root,
8438                                  struct btrfs_path *path,
8439                                  struct walk_control *wc, int *lookup_info)
8440 {
8441         u64 bytenr;
8442         u64 generation;
8443         u64 parent;
8444         u32 blocksize;
8445         struct btrfs_key key;
8446         struct extent_buffer *next;
8447         int level = wc->level;
8448         int reada = 0;
8449         int ret = 0;
8450         bool need_account = false;
8451
8452         generation = btrfs_node_ptr_generation(path->nodes[level],
8453                                                path->slots[level]);
8454         /*
8455          * if the lower level block was created before the snapshot
8456          * was created, we know there is no need to update back refs
8457          * for the subtree
8458          */
8459         if (wc->stage == UPDATE_BACKREF &&
8460             generation <= root->root_key.offset) {
8461                 *lookup_info = 1;
8462                 return 1;
8463         }
8464
8465         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8466         blocksize = root->nodesize;
8467
8468         next = btrfs_find_tree_block(root->fs_info, bytenr);
8469         if (!next) {
8470                 next = btrfs_find_create_tree_block(root, bytenr);
8471                 if (!next)
8472                         return -ENOMEM;
8473                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8474                                                level - 1);
8475                 reada = 1;
8476         }
8477         btrfs_tree_lock(next);
8478         btrfs_set_lock_blocking(next);
8479
8480         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8481                                        &wc->refs[level - 1],
8482                                        &wc->flags[level - 1]);
8483         if (ret < 0) {
8484                 btrfs_tree_unlock(next);
8485                 return ret;
8486         }
8487
8488         if (unlikely(wc->refs[level - 1] == 0)) {
8489                 btrfs_err(root->fs_info, "Missing references.");
8490                 BUG();
8491         }
8492         *lookup_info = 0;
8493
8494         if (wc->stage == DROP_REFERENCE) {
8495                 if (wc->refs[level - 1] > 1) {
8496                         need_account = true;
8497                         if (level == 1 &&
8498                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8499                                 goto skip;
8500
8501                         if (!wc->update_ref ||
8502                             generation <= root->root_key.offset)
8503                                 goto skip;
8504
8505                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8506                                               path->slots[level]);
8507                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8508                         if (ret < 0)
8509                                 goto skip;
8510
8511                         wc->stage = UPDATE_BACKREF;
8512                         wc->shared_level = level - 1;
8513                 }
8514         } else {
8515                 if (level == 1 &&
8516                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8517                         goto skip;
8518         }
8519
8520         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8521                 btrfs_tree_unlock(next);
8522                 free_extent_buffer(next);
8523                 next = NULL;
8524                 *lookup_info = 1;
8525         }
8526
8527         if (!next) {
8528                 if (reada && level == 1)
8529                         reada_walk_down(trans, root, wc, path);
8530                 next = read_tree_block(root, bytenr, generation);
8531                 if (IS_ERR(next)) {
8532                         return PTR_ERR(next);
8533                 } else if (!extent_buffer_uptodate(next)) {
8534                         free_extent_buffer(next);
8535                         return -EIO;
8536                 }
8537                 btrfs_tree_lock(next);
8538                 btrfs_set_lock_blocking(next);
8539         }
8540
8541         level--;
8542         BUG_ON(level != btrfs_header_level(next));
8543         path->nodes[level] = next;
8544         path->slots[level] = 0;
8545         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8546         wc->level = level;
8547         if (wc->level == 1)
8548                 wc->reada_slot = 0;
8549         return 0;
8550 skip:
8551         wc->refs[level - 1] = 0;
8552         wc->flags[level - 1] = 0;
8553         if (wc->stage == DROP_REFERENCE) {
8554                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8555                         parent = path->nodes[level]->start;
8556                 } else {
8557                         BUG_ON(root->root_key.objectid !=
8558                                btrfs_header_owner(path->nodes[level]));
8559                         parent = 0;
8560                 }
8561
8562                 if (need_account) {
8563                         ret = account_shared_subtree(trans, root, next,
8564                                                      generation, level - 1);
8565                         if (ret) {
8566                                 btrfs_err_rl(root->fs_info,
8567                                         "Error "
8568                                         "%d accounting shared subtree. Quota "
8569                                         "is out of sync, rescan required.",
8570                                         ret);
8571                         }
8572                 }
8573                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8574                                 root->root_key.objectid, level - 1, 0);
8575                 BUG_ON(ret); /* -ENOMEM */
8576         }
8577         btrfs_tree_unlock(next);
8578         free_extent_buffer(next);
8579         *lookup_info = 1;
8580         return 1;
8581 }
8582
8583 /*
8584  * helper to process tree block while walking up the tree.
8585  *
8586  * when wc->stage == DROP_REFERENCE, this function drops
8587  * reference count on the block.
8588  *
8589  * when wc->stage == UPDATE_BACKREF, this function changes
8590  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8591  * to UPDATE_BACKREF previously while processing the block.
8592  *
8593  * NOTE: return value 1 means we should stop walking up.
8594  */
8595 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8596                                  struct btrfs_root *root,
8597                                  struct btrfs_path *path,
8598                                  struct walk_control *wc)
8599 {
8600         int ret;
8601         int level = wc->level;
8602         struct extent_buffer *eb = path->nodes[level];
8603         u64 parent = 0;
8604
8605         if (wc->stage == UPDATE_BACKREF) {
8606                 BUG_ON(wc->shared_level < level);
8607                 if (level < wc->shared_level)
8608                         goto out;
8609
8610                 ret = find_next_key(path, level + 1, &wc->update_progress);
8611                 if (ret > 0)
8612                         wc->update_ref = 0;
8613
8614                 wc->stage = DROP_REFERENCE;
8615                 wc->shared_level = -1;
8616                 path->slots[level] = 0;
8617
8618                 /*
8619                  * check reference count again if the block isn't locked.
8620                  * we should start walking down the tree again if reference
8621                  * count is one.
8622                  */
8623                 if (!path->locks[level]) {
8624                         BUG_ON(level == 0);
8625                         btrfs_tree_lock(eb);
8626                         btrfs_set_lock_blocking(eb);
8627                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8628
8629                         ret = btrfs_lookup_extent_info(trans, root,
8630                                                        eb->start, level, 1,
8631                                                        &wc->refs[level],
8632                                                        &wc->flags[level]);
8633                         if (ret < 0) {
8634                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8635                                 path->locks[level] = 0;
8636                                 return ret;
8637                         }
8638                         BUG_ON(wc->refs[level] == 0);
8639                         if (wc->refs[level] == 1) {
8640                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8641                                 path->locks[level] = 0;
8642                                 return 1;
8643                         }
8644                 }
8645         }
8646
8647         /* wc->stage == DROP_REFERENCE */
8648         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8649
8650         if (wc->refs[level] == 1) {
8651                 if (level == 0) {
8652                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8653                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8654                         else
8655                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8656                         BUG_ON(ret); /* -ENOMEM */
8657                         ret = account_leaf_items(trans, root, eb);
8658                         if (ret) {
8659                                 btrfs_err_rl(root->fs_info,
8660                                         "error "
8661                                         "%d accounting leaf items. Quota "
8662                                         "is out of sync, rescan required.",
8663                                         ret);
8664                         }
8665                 }
8666                 /* make block locked assertion in clean_tree_block happy */
8667                 if (!path->locks[level] &&
8668                     btrfs_header_generation(eb) == trans->transid) {
8669                         btrfs_tree_lock(eb);
8670                         btrfs_set_lock_blocking(eb);
8671                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8672                 }
8673                 clean_tree_block(trans, root->fs_info, eb);
8674         }
8675
8676         if (eb == root->node) {
8677                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8678                         parent = eb->start;
8679                 else
8680                         BUG_ON(root->root_key.objectid !=
8681                                btrfs_header_owner(eb));
8682         } else {
8683                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8684                         parent = path->nodes[level + 1]->start;
8685                 else
8686                         BUG_ON(root->root_key.objectid !=
8687                                btrfs_header_owner(path->nodes[level + 1]));
8688         }
8689
8690         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8691 out:
8692         wc->refs[level] = 0;
8693         wc->flags[level] = 0;
8694         return 0;
8695 }
8696
8697 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8698                                    struct btrfs_root *root,
8699                                    struct btrfs_path *path,
8700                                    struct walk_control *wc)
8701 {
8702         int level = wc->level;
8703         int lookup_info = 1;
8704         int ret;
8705
8706         while (level >= 0) {
8707                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8708                 if (ret > 0)
8709                         break;
8710
8711                 if (level == 0)
8712                         break;
8713
8714                 if (path->slots[level] >=
8715                     btrfs_header_nritems(path->nodes[level]))
8716                         break;
8717
8718                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8719                 if (ret > 0) {
8720                         path->slots[level]++;
8721                         continue;
8722                 } else if (ret < 0)
8723                         return ret;
8724                 level = wc->level;
8725         }
8726         return 0;
8727 }
8728
8729 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8730                                  struct btrfs_root *root,
8731                                  struct btrfs_path *path,
8732                                  struct walk_control *wc, int max_level)
8733 {
8734         int level = wc->level;
8735         int ret;
8736
8737         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8738         while (level < max_level && path->nodes[level]) {
8739                 wc->level = level;
8740                 if (path->slots[level] + 1 <
8741                     btrfs_header_nritems(path->nodes[level])) {
8742                         path->slots[level]++;
8743                         return 0;
8744                 } else {
8745                         ret = walk_up_proc(trans, root, path, wc);
8746                         if (ret > 0)
8747                                 return 0;
8748
8749                         if (path->locks[level]) {
8750                                 btrfs_tree_unlock_rw(path->nodes[level],
8751                                                      path->locks[level]);
8752                                 path->locks[level] = 0;
8753                         }
8754                         free_extent_buffer(path->nodes[level]);
8755                         path->nodes[level] = NULL;
8756                         level++;
8757                 }
8758         }
8759         return 1;
8760 }
8761
8762 /*
8763  * drop a subvolume tree.
8764  *
8765  * this function traverses the tree freeing any blocks that only
8766  * referenced by the tree.
8767  *
8768  * when a shared tree block is found. this function decreases its
8769  * reference count by one. if update_ref is true, this function
8770  * also make sure backrefs for the shared block and all lower level
8771  * blocks are properly updated.
8772  *
8773  * If called with for_reloc == 0, may exit early with -EAGAIN
8774  */
8775 int btrfs_drop_snapshot(struct btrfs_root *root,
8776                          struct btrfs_block_rsv *block_rsv, int update_ref,
8777                          int for_reloc)
8778 {
8779         struct btrfs_path *path;
8780         struct btrfs_trans_handle *trans;
8781         struct btrfs_root *tree_root = root->fs_info->tree_root;
8782         struct btrfs_root_item *root_item = &root->root_item;
8783         struct walk_control *wc;
8784         struct btrfs_key key;
8785         int err = 0;
8786         int ret;
8787         int level;
8788         bool root_dropped = false;
8789
8790         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8791
8792         path = btrfs_alloc_path();
8793         if (!path) {
8794                 err = -ENOMEM;
8795                 goto out;
8796         }
8797
8798         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8799         if (!wc) {
8800                 btrfs_free_path(path);
8801                 err = -ENOMEM;
8802                 goto out;
8803         }
8804
8805         trans = btrfs_start_transaction(tree_root, 0);
8806         if (IS_ERR(trans)) {
8807                 err = PTR_ERR(trans);
8808                 goto out_free;
8809         }
8810
8811         if (block_rsv)
8812                 trans->block_rsv = block_rsv;
8813
8814         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8815                 level = btrfs_header_level(root->node);
8816                 path->nodes[level] = btrfs_lock_root_node(root);
8817                 btrfs_set_lock_blocking(path->nodes[level]);
8818                 path->slots[level] = 0;
8819                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8820                 memset(&wc->update_progress, 0,
8821                        sizeof(wc->update_progress));
8822         } else {
8823                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8824                 memcpy(&wc->update_progress, &key,
8825                        sizeof(wc->update_progress));
8826
8827                 level = root_item->drop_level;
8828                 BUG_ON(level == 0);
8829                 path->lowest_level = level;
8830                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8831                 path->lowest_level = 0;
8832                 if (ret < 0) {
8833                         err = ret;
8834                         goto out_end_trans;
8835                 }
8836                 WARN_ON(ret > 0);
8837
8838                 /*
8839                  * unlock our path, this is safe because only this
8840                  * function is allowed to delete this snapshot
8841                  */
8842                 btrfs_unlock_up_safe(path, 0);
8843
8844                 level = btrfs_header_level(root->node);
8845                 while (1) {
8846                         btrfs_tree_lock(path->nodes[level]);
8847                         btrfs_set_lock_blocking(path->nodes[level]);
8848                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8849
8850                         ret = btrfs_lookup_extent_info(trans, root,
8851                                                 path->nodes[level]->start,
8852                                                 level, 1, &wc->refs[level],
8853                                                 &wc->flags[level]);
8854                         if (ret < 0) {
8855                                 err = ret;
8856                                 goto out_end_trans;
8857                         }
8858                         BUG_ON(wc->refs[level] == 0);
8859
8860                         if (level == root_item->drop_level)
8861                                 break;
8862
8863                         btrfs_tree_unlock(path->nodes[level]);
8864                         path->locks[level] = 0;
8865                         WARN_ON(wc->refs[level] != 1);
8866                         level--;
8867                 }
8868         }
8869
8870         wc->level = level;
8871         wc->shared_level = -1;
8872         wc->stage = DROP_REFERENCE;
8873         wc->update_ref = update_ref;
8874         wc->keep_locks = 0;
8875         wc->for_reloc = for_reloc;
8876         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8877
8878         while (1) {
8879
8880                 ret = walk_down_tree(trans, root, path, wc);
8881                 if (ret < 0) {
8882                         err = ret;
8883                         break;
8884                 }
8885
8886                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8887                 if (ret < 0) {
8888                         err = ret;
8889                         break;
8890                 }
8891
8892                 if (ret > 0) {
8893                         BUG_ON(wc->stage != DROP_REFERENCE);
8894                         break;
8895                 }
8896
8897                 if (wc->stage == DROP_REFERENCE) {
8898                         level = wc->level;
8899                         btrfs_node_key(path->nodes[level],
8900                                        &root_item->drop_progress,
8901                                        path->slots[level]);
8902                         root_item->drop_level = level;
8903                 }
8904
8905                 BUG_ON(wc->level == 0);
8906                 if (btrfs_should_end_transaction(trans, tree_root) ||
8907                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8908                         ret = btrfs_update_root(trans, tree_root,
8909                                                 &root->root_key,
8910                                                 root_item);
8911                         if (ret) {
8912                                 btrfs_abort_transaction(trans, tree_root, ret);
8913                                 err = ret;
8914                                 goto out_end_trans;
8915                         }
8916
8917                         btrfs_end_transaction_throttle(trans, tree_root);
8918                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8919                                 pr_debug("BTRFS: drop snapshot early exit\n");
8920                                 err = -EAGAIN;
8921                                 goto out_free;
8922                         }
8923
8924                         trans = btrfs_start_transaction(tree_root, 0);
8925                         if (IS_ERR(trans)) {
8926                                 err = PTR_ERR(trans);
8927                                 goto out_free;
8928                         }
8929                         if (block_rsv)
8930                                 trans->block_rsv = block_rsv;
8931                 }
8932         }
8933         btrfs_release_path(path);
8934         if (err)
8935                 goto out_end_trans;
8936
8937         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8938         if (ret) {
8939                 btrfs_abort_transaction(trans, tree_root, ret);
8940                 goto out_end_trans;
8941         }
8942
8943         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8944                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8945                                       NULL, NULL);
8946                 if (ret < 0) {
8947                         btrfs_abort_transaction(trans, tree_root, ret);
8948                         err = ret;
8949                         goto out_end_trans;
8950                 } else if (ret > 0) {
8951                         /* if we fail to delete the orphan item this time
8952                          * around, it'll get picked up the next time.
8953                          *
8954                          * The most common failure here is just -ENOENT.
8955                          */
8956                         btrfs_del_orphan_item(trans, tree_root,
8957                                               root->root_key.objectid);
8958                 }
8959         }
8960
8961         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8962                 btrfs_add_dropped_root(trans, root);
8963         } else {
8964                 free_extent_buffer(root->node);
8965                 free_extent_buffer(root->commit_root);
8966                 btrfs_put_fs_root(root);
8967         }
8968         root_dropped = true;
8969 out_end_trans:
8970         btrfs_end_transaction_throttle(trans, tree_root);
8971 out_free:
8972         kfree(wc);
8973         btrfs_free_path(path);
8974 out:
8975         /*
8976          * So if we need to stop dropping the snapshot for whatever reason we
8977          * need to make sure to add it back to the dead root list so that we
8978          * keep trying to do the work later.  This also cleans up roots if we
8979          * don't have it in the radix (like when we recover after a power fail
8980          * or unmount) so we don't leak memory.
8981          */
8982         if (!for_reloc && root_dropped == false)
8983                 btrfs_add_dead_root(root);
8984         if (err && err != -EAGAIN)
8985                 btrfs_std_error(root->fs_info, err, NULL);
8986         return err;
8987 }
8988
8989 /*
8990  * drop subtree rooted at tree block 'node'.
8991  *
8992  * NOTE: this function will unlock and release tree block 'node'
8993  * only used by relocation code
8994  */
8995 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8996                         struct btrfs_root *root,
8997                         struct extent_buffer *node,
8998                         struct extent_buffer *parent)
8999 {
9000         struct btrfs_path *path;
9001         struct walk_control *wc;
9002         int level;
9003         int parent_level;
9004         int ret = 0;
9005         int wret;
9006
9007         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9008
9009         path = btrfs_alloc_path();
9010         if (!path)
9011                 return -ENOMEM;
9012
9013         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9014         if (!wc) {
9015                 btrfs_free_path(path);
9016                 return -ENOMEM;
9017         }
9018
9019         btrfs_assert_tree_locked(parent);
9020         parent_level = btrfs_header_level(parent);
9021         extent_buffer_get(parent);
9022         path->nodes[parent_level] = parent;
9023         path->slots[parent_level] = btrfs_header_nritems(parent);
9024
9025         btrfs_assert_tree_locked(node);
9026         level = btrfs_header_level(node);
9027         path->nodes[level] = node;
9028         path->slots[level] = 0;
9029         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9030
9031         wc->refs[parent_level] = 1;
9032         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9033         wc->level = level;
9034         wc->shared_level = -1;
9035         wc->stage = DROP_REFERENCE;
9036         wc->update_ref = 0;
9037         wc->keep_locks = 1;
9038         wc->for_reloc = 1;
9039         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9040
9041         while (1) {
9042                 wret = walk_down_tree(trans, root, path, wc);
9043                 if (wret < 0) {
9044                         ret = wret;
9045                         break;
9046                 }
9047
9048                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9049                 if (wret < 0)
9050                         ret = wret;
9051                 if (wret != 0)
9052                         break;
9053         }
9054
9055         kfree(wc);
9056         btrfs_free_path(path);
9057         return ret;
9058 }
9059
9060 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9061 {
9062         u64 num_devices;
9063         u64 stripped;
9064
9065         /*
9066          * if restripe for this chunk_type is on pick target profile and
9067          * return, otherwise do the usual balance
9068          */
9069         stripped = get_restripe_target(root->fs_info, flags);
9070         if (stripped)
9071                 return extended_to_chunk(stripped);
9072
9073         num_devices = root->fs_info->fs_devices->rw_devices;
9074
9075         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9076                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9077                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9078
9079         if (num_devices == 1) {
9080                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9081                 stripped = flags & ~stripped;
9082
9083                 /* turn raid0 into single device chunks */
9084                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9085                         return stripped;
9086
9087                 /* turn mirroring into duplication */
9088                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9089                              BTRFS_BLOCK_GROUP_RAID10))
9090                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9091         } else {
9092                 /* they already had raid on here, just return */
9093                 if (flags & stripped)
9094                         return flags;
9095
9096                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9097                 stripped = flags & ~stripped;
9098
9099                 /* switch duplicated blocks with raid1 */
9100                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9101                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9102
9103                 /* this is drive concat, leave it alone */
9104         }
9105
9106         return flags;
9107 }
9108
9109 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9110 {
9111         struct btrfs_space_info *sinfo = cache->space_info;
9112         u64 num_bytes;
9113         u64 min_allocable_bytes;
9114         int ret = -ENOSPC;
9115
9116         /*
9117          * We need some metadata space and system metadata space for
9118          * allocating chunks in some corner cases until we force to set
9119          * it to be readonly.
9120          */
9121         if ((sinfo->flags &
9122              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9123             !force)
9124                 min_allocable_bytes = SZ_1M;
9125         else
9126                 min_allocable_bytes = 0;
9127
9128         spin_lock(&sinfo->lock);
9129         spin_lock(&cache->lock);
9130
9131         if (cache->ro) {
9132                 cache->ro++;
9133                 ret = 0;
9134                 goto out;
9135         }
9136
9137         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9138                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9139
9140         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9141             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9142             min_allocable_bytes <= sinfo->total_bytes) {
9143                 sinfo->bytes_readonly += num_bytes;
9144                 cache->ro++;
9145                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9146                 ret = 0;
9147         }
9148 out:
9149         spin_unlock(&cache->lock);
9150         spin_unlock(&sinfo->lock);
9151         return ret;
9152 }
9153
9154 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9155                              struct btrfs_block_group_cache *cache)
9156
9157 {
9158         struct btrfs_trans_handle *trans;
9159         u64 alloc_flags;
9160         int ret;
9161
9162 again:
9163         trans = btrfs_join_transaction(root);
9164         if (IS_ERR(trans))
9165                 return PTR_ERR(trans);
9166
9167         /*
9168          * we're not allowed to set block groups readonly after the dirty
9169          * block groups cache has started writing.  If it already started,
9170          * back off and let this transaction commit
9171          */
9172         mutex_lock(&root->fs_info->ro_block_group_mutex);
9173         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9174                 u64 transid = trans->transid;
9175
9176                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9177                 btrfs_end_transaction(trans, root);
9178
9179                 ret = btrfs_wait_for_commit(root, transid);
9180                 if (ret)
9181                         return ret;
9182                 goto again;
9183         }
9184
9185         /*
9186          * if we are changing raid levels, try to allocate a corresponding
9187          * block group with the new raid level.
9188          */
9189         alloc_flags = update_block_group_flags(root, cache->flags);
9190         if (alloc_flags != cache->flags) {
9191                 ret = do_chunk_alloc(trans, root, alloc_flags,
9192                                      CHUNK_ALLOC_FORCE);
9193                 /*
9194                  * ENOSPC is allowed here, we may have enough space
9195                  * already allocated at the new raid level to
9196                  * carry on
9197                  */
9198                 if (ret == -ENOSPC)
9199                         ret = 0;
9200                 if (ret < 0)
9201                         goto out;
9202         }
9203
9204         ret = inc_block_group_ro(cache, 0);
9205         if (!ret)
9206                 goto out;
9207         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9208         ret = do_chunk_alloc(trans, root, alloc_flags,
9209                              CHUNK_ALLOC_FORCE);
9210         if (ret < 0)
9211                 goto out;
9212         ret = inc_block_group_ro(cache, 0);
9213 out:
9214         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9215                 alloc_flags = update_block_group_flags(root, cache->flags);
9216                 lock_chunks(root->fs_info->chunk_root);
9217                 check_system_chunk(trans, root, alloc_flags);
9218                 unlock_chunks(root->fs_info->chunk_root);
9219         }
9220         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9221
9222         btrfs_end_transaction(trans, root);
9223         return ret;
9224 }
9225
9226 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9227                             struct btrfs_root *root, u64 type)
9228 {
9229         u64 alloc_flags = get_alloc_profile(root, type);
9230         return do_chunk_alloc(trans, root, alloc_flags,
9231                               CHUNK_ALLOC_FORCE);
9232 }
9233
9234 /*
9235  * helper to account the unused space of all the readonly block group in the
9236  * space_info. takes mirrors into account.
9237  */
9238 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9239 {
9240         struct btrfs_block_group_cache *block_group;
9241         u64 free_bytes = 0;
9242         int factor;
9243
9244         /* It's df, we don't care if it's racey */
9245         if (list_empty(&sinfo->ro_bgs))
9246                 return 0;
9247
9248         spin_lock(&sinfo->lock);
9249         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9250                 spin_lock(&block_group->lock);
9251
9252                 if (!block_group->ro) {
9253                         spin_unlock(&block_group->lock);
9254                         continue;
9255                 }
9256
9257                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9258                                           BTRFS_BLOCK_GROUP_RAID10 |
9259                                           BTRFS_BLOCK_GROUP_DUP))
9260                         factor = 2;
9261                 else
9262                         factor = 1;
9263
9264                 free_bytes += (block_group->key.offset -
9265                                btrfs_block_group_used(&block_group->item)) *
9266                                factor;
9267
9268                 spin_unlock(&block_group->lock);
9269         }
9270         spin_unlock(&sinfo->lock);
9271
9272         return free_bytes;
9273 }
9274
9275 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9276                               struct btrfs_block_group_cache *cache)
9277 {
9278         struct btrfs_space_info *sinfo = cache->space_info;
9279         u64 num_bytes;
9280
9281         BUG_ON(!cache->ro);
9282
9283         spin_lock(&sinfo->lock);
9284         spin_lock(&cache->lock);
9285         if (!--cache->ro) {
9286                 num_bytes = cache->key.offset - cache->reserved -
9287                             cache->pinned - cache->bytes_super -
9288                             btrfs_block_group_used(&cache->item);
9289                 sinfo->bytes_readonly -= num_bytes;
9290                 list_del_init(&cache->ro_list);
9291         }
9292         spin_unlock(&cache->lock);
9293         spin_unlock(&sinfo->lock);
9294 }
9295
9296 /*
9297  * checks to see if its even possible to relocate this block group.
9298  *
9299  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9300  * ok to go ahead and try.
9301  */
9302 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9303 {
9304         struct btrfs_block_group_cache *block_group;
9305         struct btrfs_space_info *space_info;
9306         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9307         struct btrfs_device *device;
9308         struct btrfs_trans_handle *trans;
9309         u64 min_free;
9310         u64 dev_min = 1;
9311         u64 dev_nr = 0;
9312         u64 target;
9313         int index;
9314         int full = 0;
9315         int ret = 0;
9316
9317         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9318
9319         /* odd, couldn't find the block group, leave it alone */
9320         if (!block_group)
9321                 return -1;
9322
9323         min_free = btrfs_block_group_used(&block_group->item);
9324
9325         /* no bytes used, we're good */
9326         if (!min_free)
9327                 goto out;
9328
9329         space_info = block_group->space_info;
9330         spin_lock(&space_info->lock);
9331
9332         full = space_info->full;
9333
9334         /*
9335          * if this is the last block group we have in this space, we can't
9336          * relocate it unless we're able to allocate a new chunk below.
9337          *
9338          * Otherwise, we need to make sure we have room in the space to handle
9339          * all of the extents from this block group.  If we can, we're good
9340          */
9341         if ((space_info->total_bytes != block_group->key.offset) &&
9342             (space_info->bytes_used + space_info->bytes_reserved +
9343              space_info->bytes_pinned + space_info->bytes_readonly +
9344              min_free < space_info->total_bytes)) {
9345                 spin_unlock(&space_info->lock);
9346                 goto out;
9347         }
9348         spin_unlock(&space_info->lock);
9349
9350         /*
9351          * ok we don't have enough space, but maybe we have free space on our
9352          * devices to allocate new chunks for relocation, so loop through our
9353          * alloc devices and guess if we have enough space.  if this block
9354          * group is going to be restriped, run checks against the target
9355          * profile instead of the current one.
9356          */
9357         ret = -1;
9358
9359         /*
9360          * index:
9361          *      0: raid10
9362          *      1: raid1
9363          *      2: dup
9364          *      3: raid0
9365          *      4: single
9366          */
9367         target = get_restripe_target(root->fs_info, block_group->flags);
9368         if (target) {
9369                 index = __get_raid_index(extended_to_chunk(target));
9370         } else {
9371                 /*
9372                  * this is just a balance, so if we were marked as full
9373                  * we know there is no space for a new chunk
9374                  */
9375                 if (full)
9376                         goto out;
9377
9378                 index = get_block_group_index(block_group);
9379         }
9380
9381         if (index == BTRFS_RAID_RAID10) {
9382                 dev_min = 4;
9383                 /* Divide by 2 */
9384                 min_free >>= 1;
9385         } else if (index == BTRFS_RAID_RAID1) {
9386                 dev_min = 2;
9387         } else if (index == BTRFS_RAID_DUP) {
9388                 /* Multiply by 2 */
9389                 min_free <<= 1;
9390         } else if (index == BTRFS_RAID_RAID0) {
9391                 dev_min = fs_devices->rw_devices;
9392                 min_free = div64_u64(min_free, dev_min);
9393         }
9394
9395         /* We need to do this so that we can look at pending chunks */
9396         trans = btrfs_join_transaction(root);
9397         if (IS_ERR(trans)) {
9398                 ret = PTR_ERR(trans);
9399                 goto out;
9400         }
9401
9402         mutex_lock(&root->fs_info->chunk_mutex);
9403         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9404                 u64 dev_offset;
9405
9406                 /*
9407                  * check to make sure we can actually find a chunk with enough
9408                  * space to fit our block group in.
9409                  */
9410                 if (device->total_bytes > device->bytes_used + min_free &&
9411                     !device->is_tgtdev_for_dev_replace) {
9412                         ret = find_free_dev_extent(trans, device, min_free,
9413                                                    &dev_offset, NULL);
9414                         if (!ret)
9415                                 dev_nr++;
9416
9417                         if (dev_nr >= dev_min)
9418                                 break;
9419
9420                         ret = -1;
9421                 }
9422         }
9423         mutex_unlock(&root->fs_info->chunk_mutex);
9424         btrfs_end_transaction(trans, root);
9425 out:
9426         btrfs_put_block_group(block_group);
9427         return ret;
9428 }
9429
9430 static int find_first_block_group(struct btrfs_root *root,
9431                 struct btrfs_path *path, struct btrfs_key *key)
9432 {
9433         int ret = 0;
9434         struct btrfs_key found_key;
9435         struct extent_buffer *leaf;
9436         int slot;
9437
9438         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9439         if (ret < 0)
9440                 goto out;
9441
9442         while (1) {
9443                 slot = path->slots[0];
9444                 leaf = path->nodes[0];
9445                 if (slot >= btrfs_header_nritems(leaf)) {
9446                         ret = btrfs_next_leaf(root, path);
9447                         if (ret == 0)
9448                                 continue;
9449                         if (ret < 0)
9450                                 goto out;
9451                         break;
9452                 }
9453                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9454
9455                 if (found_key.objectid >= key->objectid &&
9456                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9457                         ret = 0;
9458                         goto out;
9459                 }
9460                 path->slots[0]++;
9461         }
9462 out:
9463         return ret;
9464 }
9465
9466 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9467 {
9468         struct btrfs_block_group_cache *block_group;
9469         u64 last = 0;
9470
9471         while (1) {
9472                 struct inode *inode;
9473
9474                 block_group = btrfs_lookup_first_block_group(info, last);
9475                 while (block_group) {
9476                         spin_lock(&block_group->lock);
9477                         if (block_group->iref)
9478                                 break;
9479                         spin_unlock(&block_group->lock);
9480                         block_group = next_block_group(info->tree_root,
9481                                                        block_group);
9482                 }
9483                 if (!block_group) {
9484                         if (last == 0)
9485                                 break;
9486                         last = 0;
9487                         continue;
9488                 }
9489
9490                 inode = block_group->inode;
9491                 block_group->iref = 0;
9492                 block_group->inode = NULL;
9493                 spin_unlock(&block_group->lock);
9494                 iput(inode);
9495                 last = block_group->key.objectid + block_group->key.offset;
9496                 btrfs_put_block_group(block_group);
9497         }
9498 }
9499
9500 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9501 {
9502         struct btrfs_block_group_cache *block_group;
9503         struct btrfs_space_info *space_info;
9504         struct btrfs_caching_control *caching_ctl;
9505         struct rb_node *n;
9506
9507         down_write(&info->commit_root_sem);
9508         while (!list_empty(&info->caching_block_groups)) {
9509                 caching_ctl = list_entry(info->caching_block_groups.next,
9510                                          struct btrfs_caching_control, list);
9511                 list_del(&caching_ctl->list);
9512                 put_caching_control(caching_ctl);
9513         }
9514         up_write(&info->commit_root_sem);
9515
9516         spin_lock(&info->unused_bgs_lock);
9517         while (!list_empty(&info->unused_bgs)) {
9518                 block_group = list_first_entry(&info->unused_bgs,
9519                                                struct btrfs_block_group_cache,
9520                                                bg_list);
9521                 list_del_init(&block_group->bg_list);
9522                 btrfs_put_block_group(block_group);
9523         }
9524         spin_unlock(&info->unused_bgs_lock);
9525
9526         spin_lock(&info->block_group_cache_lock);
9527         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9528                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9529                                        cache_node);
9530                 rb_erase(&block_group->cache_node,
9531                          &info->block_group_cache_tree);
9532                 RB_CLEAR_NODE(&block_group->cache_node);
9533                 spin_unlock(&info->block_group_cache_lock);
9534
9535                 down_write(&block_group->space_info->groups_sem);
9536                 list_del(&block_group->list);
9537                 up_write(&block_group->space_info->groups_sem);
9538
9539                 if (block_group->cached == BTRFS_CACHE_STARTED)
9540                         wait_block_group_cache_done(block_group);
9541
9542                 /*
9543                  * We haven't cached this block group, which means we could
9544                  * possibly have excluded extents on this block group.
9545                  */
9546                 if (block_group->cached == BTRFS_CACHE_NO ||
9547                     block_group->cached == BTRFS_CACHE_ERROR)
9548                         free_excluded_extents(info->extent_root, block_group);
9549
9550                 btrfs_remove_free_space_cache(block_group);
9551                 btrfs_put_block_group(block_group);
9552
9553                 spin_lock(&info->block_group_cache_lock);
9554         }
9555         spin_unlock(&info->block_group_cache_lock);
9556
9557         /* now that all the block groups are freed, go through and
9558          * free all the space_info structs.  This is only called during
9559          * the final stages of unmount, and so we know nobody is
9560          * using them.  We call synchronize_rcu() once before we start,
9561          * just to be on the safe side.
9562          */
9563         synchronize_rcu();
9564
9565         release_global_block_rsv(info);
9566
9567         while (!list_empty(&info->space_info)) {
9568                 int i;
9569
9570                 space_info = list_entry(info->space_info.next,
9571                                         struct btrfs_space_info,
9572                                         list);
9573                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9574                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9575                             space_info->bytes_reserved > 0 ||
9576                             space_info->bytes_may_use > 0)) {
9577                                 dump_space_info(space_info, 0, 0);
9578                         }
9579                 }
9580                 list_del(&space_info->list);
9581                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9582                         struct kobject *kobj;
9583                         kobj = space_info->block_group_kobjs[i];
9584                         space_info->block_group_kobjs[i] = NULL;
9585                         if (kobj) {
9586                                 kobject_del(kobj);
9587                                 kobject_put(kobj);
9588                         }
9589                 }
9590                 kobject_del(&space_info->kobj);
9591                 kobject_put(&space_info->kobj);
9592         }
9593         return 0;
9594 }
9595
9596 static void __link_block_group(struct btrfs_space_info *space_info,
9597                                struct btrfs_block_group_cache *cache)
9598 {
9599         int index = get_block_group_index(cache);
9600         bool first = false;
9601
9602         down_write(&space_info->groups_sem);
9603         if (list_empty(&space_info->block_groups[index]))
9604                 first = true;
9605         list_add_tail(&cache->list, &space_info->block_groups[index]);
9606         up_write(&space_info->groups_sem);
9607
9608         if (first) {
9609                 struct raid_kobject *rkobj;
9610                 int ret;
9611
9612                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9613                 if (!rkobj)
9614                         goto out_err;
9615                 rkobj->raid_type = index;
9616                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9617                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9618                                   "%s", get_raid_name(index));
9619                 if (ret) {
9620                         kobject_put(&rkobj->kobj);
9621                         goto out_err;
9622                 }
9623                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9624         }
9625
9626         return;
9627 out_err:
9628         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9629 }
9630
9631 static struct btrfs_block_group_cache *
9632 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9633 {
9634         struct btrfs_block_group_cache *cache;
9635
9636         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9637         if (!cache)
9638                 return NULL;
9639
9640         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9641                                         GFP_NOFS);
9642         if (!cache->free_space_ctl) {
9643                 kfree(cache);
9644                 return NULL;
9645         }
9646
9647         cache->key.objectid = start;
9648         cache->key.offset = size;
9649         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9650
9651         cache->sectorsize = root->sectorsize;
9652         cache->fs_info = root->fs_info;
9653         cache->full_stripe_len = btrfs_full_stripe_len(root,
9654                                                &root->fs_info->mapping_tree,
9655                                                start);
9656         atomic_set(&cache->count, 1);
9657         spin_lock_init(&cache->lock);
9658         init_rwsem(&cache->data_rwsem);
9659         INIT_LIST_HEAD(&cache->list);
9660         INIT_LIST_HEAD(&cache->cluster_list);
9661         INIT_LIST_HEAD(&cache->bg_list);
9662         INIT_LIST_HEAD(&cache->ro_list);
9663         INIT_LIST_HEAD(&cache->dirty_list);
9664         INIT_LIST_HEAD(&cache->io_list);
9665         btrfs_init_free_space_ctl(cache);
9666         atomic_set(&cache->trimming, 0);
9667
9668         return cache;
9669 }
9670
9671 int btrfs_read_block_groups(struct btrfs_root *root)
9672 {
9673         struct btrfs_path *path;
9674         int ret;
9675         struct btrfs_block_group_cache *cache;
9676         struct btrfs_fs_info *info = root->fs_info;
9677         struct btrfs_space_info *space_info;
9678         struct btrfs_key key;
9679         struct btrfs_key found_key;
9680         struct extent_buffer *leaf;
9681         int need_clear = 0;
9682         u64 cache_gen;
9683
9684         root = info->extent_root;
9685         key.objectid = 0;
9686         key.offset = 0;
9687         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9688         path = btrfs_alloc_path();
9689         if (!path)
9690                 return -ENOMEM;
9691         path->reada = 1;
9692
9693         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9694         if (btrfs_test_opt(root, SPACE_CACHE) &&
9695             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9696                 need_clear = 1;
9697         if (btrfs_test_opt(root, CLEAR_CACHE))
9698                 need_clear = 1;
9699
9700         while (1) {
9701                 ret = find_first_block_group(root, path, &key);
9702                 if (ret > 0)
9703                         break;
9704                 if (ret != 0)
9705                         goto error;
9706
9707                 leaf = path->nodes[0];
9708                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9709
9710                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9711                                                        found_key.offset);
9712                 if (!cache) {
9713                         ret = -ENOMEM;
9714                         goto error;
9715                 }
9716
9717                 if (need_clear) {
9718                         /*
9719                          * When we mount with old space cache, we need to
9720                          * set BTRFS_DC_CLEAR and set dirty flag.
9721                          *
9722                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9723                          *    truncate the old free space cache inode and
9724                          *    setup a new one.
9725                          * b) Setting 'dirty flag' makes sure that we flush
9726                          *    the new space cache info onto disk.
9727                          */
9728                         if (btrfs_test_opt(root, SPACE_CACHE))
9729                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9730                 }
9731
9732                 read_extent_buffer(leaf, &cache->item,
9733                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9734                                    sizeof(cache->item));
9735                 cache->flags = btrfs_block_group_flags(&cache->item);
9736
9737                 key.objectid = found_key.objectid + found_key.offset;
9738                 btrfs_release_path(path);
9739
9740                 /*
9741                  * We need to exclude the super stripes now so that the space
9742                  * info has super bytes accounted for, otherwise we'll think
9743                  * we have more space than we actually do.
9744                  */
9745                 ret = exclude_super_stripes(root, cache);
9746                 if (ret) {
9747                         /*
9748                          * We may have excluded something, so call this just in
9749                          * case.
9750                          */
9751                         free_excluded_extents(root, cache);
9752                         btrfs_put_block_group(cache);
9753                         goto error;
9754                 }
9755
9756                 /*
9757                  * check for two cases, either we are full, and therefore
9758                  * don't need to bother with the caching work since we won't
9759                  * find any space, or we are empty, and we can just add all
9760                  * the space in and be done with it.  This saves us _alot_ of
9761                  * time, particularly in the full case.
9762                  */
9763                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9764                         cache->last_byte_to_unpin = (u64)-1;
9765                         cache->cached = BTRFS_CACHE_FINISHED;
9766                         free_excluded_extents(root, cache);
9767                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9768                         cache->last_byte_to_unpin = (u64)-1;
9769                         cache->cached = BTRFS_CACHE_FINISHED;
9770                         add_new_free_space(cache, root->fs_info,
9771                                            found_key.objectid,
9772                                            found_key.objectid +
9773                                            found_key.offset);
9774                         free_excluded_extents(root, cache);
9775                 }
9776
9777                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9778                 if (ret) {
9779                         btrfs_remove_free_space_cache(cache);
9780                         btrfs_put_block_group(cache);
9781                         goto error;
9782                 }
9783
9784                 ret = update_space_info(info, cache->flags, found_key.offset,
9785                                         btrfs_block_group_used(&cache->item),
9786                                         &space_info);
9787                 if (ret) {
9788                         btrfs_remove_free_space_cache(cache);
9789                         spin_lock(&info->block_group_cache_lock);
9790                         rb_erase(&cache->cache_node,
9791                                  &info->block_group_cache_tree);
9792                         RB_CLEAR_NODE(&cache->cache_node);
9793                         spin_unlock(&info->block_group_cache_lock);
9794                         btrfs_put_block_group(cache);
9795                         goto error;
9796                 }
9797
9798                 cache->space_info = space_info;
9799                 spin_lock(&cache->space_info->lock);
9800                 cache->space_info->bytes_readonly += cache->bytes_super;
9801                 spin_unlock(&cache->space_info->lock);
9802
9803                 __link_block_group(space_info, cache);
9804
9805                 set_avail_alloc_bits(root->fs_info, cache->flags);
9806                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9807                         inc_block_group_ro(cache, 1);
9808                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9809                         spin_lock(&info->unused_bgs_lock);
9810                         /* Should always be true but just in case. */
9811                         if (list_empty(&cache->bg_list)) {
9812                                 btrfs_get_block_group(cache);
9813                                 list_add_tail(&cache->bg_list,
9814                                               &info->unused_bgs);
9815                         }
9816                         spin_unlock(&info->unused_bgs_lock);
9817                 }
9818         }
9819
9820         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9821                 if (!(get_alloc_profile(root, space_info->flags) &
9822                       (BTRFS_BLOCK_GROUP_RAID10 |
9823                        BTRFS_BLOCK_GROUP_RAID1 |
9824                        BTRFS_BLOCK_GROUP_RAID5 |
9825                        BTRFS_BLOCK_GROUP_RAID6 |
9826                        BTRFS_BLOCK_GROUP_DUP)))
9827                         continue;
9828                 /*
9829                  * avoid allocating from un-mirrored block group if there are
9830                  * mirrored block groups.
9831                  */
9832                 list_for_each_entry(cache,
9833                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9834                                 list)
9835                         inc_block_group_ro(cache, 1);
9836                 list_for_each_entry(cache,
9837                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9838                                 list)
9839                         inc_block_group_ro(cache, 1);
9840         }
9841
9842         init_global_block_rsv(info);
9843         ret = 0;
9844 error:
9845         btrfs_free_path(path);
9846         return ret;
9847 }
9848
9849 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9850                                        struct btrfs_root *root)
9851 {
9852         struct btrfs_block_group_cache *block_group, *tmp;
9853         struct btrfs_root *extent_root = root->fs_info->extent_root;
9854         struct btrfs_block_group_item item;
9855         struct btrfs_key key;
9856         int ret = 0;
9857         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9858
9859         trans->can_flush_pending_bgs = false;
9860         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9861                 if (ret)
9862                         goto next;
9863
9864                 spin_lock(&block_group->lock);
9865                 memcpy(&item, &block_group->item, sizeof(item));
9866                 memcpy(&key, &block_group->key, sizeof(key));
9867                 spin_unlock(&block_group->lock);
9868
9869                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9870                                         sizeof(item));
9871                 if (ret)
9872                         btrfs_abort_transaction(trans, extent_root, ret);
9873                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9874                                                key.objectid, key.offset);
9875                 if (ret)
9876                         btrfs_abort_transaction(trans, extent_root, ret);
9877 next:
9878                 list_del_init(&block_group->bg_list);
9879         }
9880         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9881 }
9882
9883 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9884                            struct btrfs_root *root, u64 bytes_used,
9885                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9886                            u64 size)
9887 {
9888         int ret;
9889         struct btrfs_root *extent_root;
9890         struct btrfs_block_group_cache *cache;
9891
9892         extent_root = root->fs_info->extent_root;
9893
9894         btrfs_set_log_full_commit(root->fs_info, trans);
9895
9896         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9897         if (!cache)
9898                 return -ENOMEM;
9899
9900         btrfs_set_block_group_used(&cache->item, bytes_used);
9901         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9902         btrfs_set_block_group_flags(&cache->item, type);
9903
9904         cache->flags = type;
9905         cache->last_byte_to_unpin = (u64)-1;
9906         cache->cached = BTRFS_CACHE_FINISHED;
9907         ret = exclude_super_stripes(root, cache);
9908         if (ret) {
9909                 /*
9910                  * We may have excluded something, so call this just in
9911                  * case.
9912                  */
9913                 free_excluded_extents(root, cache);
9914                 btrfs_put_block_group(cache);
9915                 return ret;
9916         }
9917
9918         add_new_free_space(cache, root->fs_info, chunk_offset,
9919                            chunk_offset + size);
9920
9921         free_excluded_extents(root, cache);
9922
9923 #ifdef CONFIG_BTRFS_DEBUG
9924         if (btrfs_should_fragment_free_space(root, cache)) {
9925                 u64 new_bytes_used = size - bytes_used;
9926
9927                 bytes_used += new_bytes_used >> 1;
9928                 fragment_free_space(root, cache);
9929         }
9930 #endif
9931         /*
9932          * Call to ensure the corresponding space_info object is created and
9933          * assigned to our block group, but don't update its counters just yet.
9934          * We want our bg to be added to the rbtree with its ->space_info set.
9935          */
9936         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9937                                 &cache->space_info);
9938         if (ret) {
9939                 btrfs_remove_free_space_cache(cache);
9940                 btrfs_put_block_group(cache);
9941                 return ret;
9942         }
9943
9944         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9945         if (ret) {
9946                 btrfs_remove_free_space_cache(cache);
9947                 btrfs_put_block_group(cache);
9948                 return ret;
9949         }
9950
9951         /*
9952          * Now that our block group has its ->space_info set and is inserted in
9953          * the rbtree, update the space info's counters.
9954          */
9955         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9956                                 &cache->space_info);
9957         if (ret) {
9958                 btrfs_remove_free_space_cache(cache);
9959                 spin_lock(&root->fs_info->block_group_cache_lock);
9960                 rb_erase(&cache->cache_node,
9961                          &root->fs_info->block_group_cache_tree);
9962                 RB_CLEAR_NODE(&cache->cache_node);
9963                 spin_unlock(&root->fs_info->block_group_cache_lock);
9964                 btrfs_put_block_group(cache);
9965                 return ret;
9966         }
9967         update_global_block_rsv(root->fs_info);
9968
9969         spin_lock(&cache->space_info->lock);
9970         cache->space_info->bytes_readonly += cache->bytes_super;
9971         spin_unlock(&cache->space_info->lock);
9972
9973         __link_block_group(cache->space_info, cache);
9974
9975         list_add_tail(&cache->bg_list, &trans->new_bgs);
9976
9977         set_avail_alloc_bits(extent_root->fs_info, type);
9978
9979         return 0;
9980 }
9981
9982 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9983 {
9984         u64 extra_flags = chunk_to_extended(flags) &
9985                                 BTRFS_EXTENDED_PROFILE_MASK;
9986
9987         write_seqlock(&fs_info->profiles_lock);
9988         if (flags & BTRFS_BLOCK_GROUP_DATA)
9989                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9990         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9991                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9992         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9993                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9994         write_sequnlock(&fs_info->profiles_lock);
9995 }
9996
9997 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9998                              struct btrfs_root *root, u64 group_start,
9999                              struct extent_map *em)
10000 {
10001         struct btrfs_path *path;
10002         struct btrfs_block_group_cache *block_group;
10003         struct btrfs_free_cluster *cluster;
10004         struct btrfs_root *tree_root = root->fs_info->tree_root;
10005         struct btrfs_key key;
10006         struct inode *inode;
10007         struct kobject *kobj = NULL;
10008         int ret;
10009         int index;
10010         int factor;
10011         struct btrfs_caching_control *caching_ctl = NULL;
10012         bool remove_em;
10013
10014         root = root->fs_info->extent_root;
10015
10016         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10017         BUG_ON(!block_group);
10018         BUG_ON(!block_group->ro);
10019
10020         /*
10021          * Free the reserved super bytes from this block group before
10022          * remove it.
10023          */
10024         free_excluded_extents(root, block_group);
10025
10026         memcpy(&key, &block_group->key, sizeof(key));
10027         index = get_block_group_index(block_group);
10028         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10029                                   BTRFS_BLOCK_GROUP_RAID1 |
10030                                   BTRFS_BLOCK_GROUP_RAID10))
10031                 factor = 2;
10032         else
10033                 factor = 1;
10034
10035         /* make sure this block group isn't part of an allocation cluster */
10036         cluster = &root->fs_info->data_alloc_cluster;
10037         spin_lock(&cluster->refill_lock);
10038         btrfs_return_cluster_to_free_space(block_group, cluster);
10039         spin_unlock(&cluster->refill_lock);
10040
10041         /*
10042          * make sure this block group isn't part of a metadata
10043          * allocation cluster
10044          */
10045         cluster = &root->fs_info->meta_alloc_cluster;
10046         spin_lock(&cluster->refill_lock);
10047         btrfs_return_cluster_to_free_space(block_group, cluster);
10048         spin_unlock(&cluster->refill_lock);
10049
10050         path = btrfs_alloc_path();
10051         if (!path) {
10052                 ret = -ENOMEM;
10053                 goto out;
10054         }
10055
10056         /*
10057          * get the inode first so any iput calls done for the io_list
10058          * aren't the final iput (no unlinks allowed now)
10059          */
10060         inode = lookup_free_space_inode(tree_root, block_group, path);
10061
10062         mutex_lock(&trans->transaction->cache_write_mutex);
10063         /*
10064          * make sure our free spache cache IO is done before remove the
10065          * free space inode
10066          */
10067         spin_lock(&trans->transaction->dirty_bgs_lock);
10068         if (!list_empty(&block_group->io_list)) {
10069                 list_del_init(&block_group->io_list);
10070
10071                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10072
10073                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10074                 btrfs_wait_cache_io(root, trans, block_group,
10075                                     &block_group->io_ctl, path,
10076                                     block_group->key.objectid);
10077                 btrfs_put_block_group(block_group);
10078                 spin_lock(&trans->transaction->dirty_bgs_lock);
10079         }
10080
10081         if (!list_empty(&block_group->dirty_list)) {
10082                 list_del_init(&block_group->dirty_list);
10083                 btrfs_put_block_group(block_group);
10084         }
10085         spin_unlock(&trans->transaction->dirty_bgs_lock);
10086         mutex_unlock(&trans->transaction->cache_write_mutex);
10087
10088         if (!IS_ERR(inode)) {
10089                 ret = btrfs_orphan_add(trans, inode);
10090                 if (ret) {
10091                         btrfs_add_delayed_iput(inode);
10092                         goto out;
10093                 }
10094                 clear_nlink(inode);
10095                 /* One for the block groups ref */
10096                 spin_lock(&block_group->lock);
10097                 if (block_group->iref) {
10098                         block_group->iref = 0;
10099                         block_group->inode = NULL;
10100                         spin_unlock(&block_group->lock);
10101                         iput(inode);
10102                 } else {
10103                         spin_unlock(&block_group->lock);
10104                 }
10105                 /* One for our lookup ref */
10106                 btrfs_add_delayed_iput(inode);
10107         }
10108
10109         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10110         key.offset = block_group->key.objectid;
10111         key.type = 0;
10112
10113         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10114         if (ret < 0)
10115                 goto out;
10116         if (ret > 0)
10117                 btrfs_release_path(path);
10118         if (ret == 0) {
10119                 ret = btrfs_del_item(trans, tree_root, path);
10120                 if (ret)
10121                         goto out;
10122                 btrfs_release_path(path);
10123         }
10124
10125         spin_lock(&root->fs_info->block_group_cache_lock);
10126         rb_erase(&block_group->cache_node,
10127                  &root->fs_info->block_group_cache_tree);
10128         RB_CLEAR_NODE(&block_group->cache_node);
10129
10130         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10131                 root->fs_info->first_logical_byte = (u64)-1;
10132         spin_unlock(&root->fs_info->block_group_cache_lock);
10133
10134         down_write(&block_group->space_info->groups_sem);
10135         /*
10136          * we must use list_del_init so people can check to see if they
10137          * are still on the list after taking the semaphore
10138          */
10139         list_del_init(&block_group->list);
10140         if (list_empty(&block_group->space_info->block_groups[index])) {
10141                 kobj = block_group->space_info->block_group_kobjs[index];
10142                 block_group->space_info->block_group_kobjs[index] = NULL;
10143                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10144         }
10145         up_write(&block_group->space_info->groups_sem);
10146         if (kobj) {
10147                 kobject_del(kobj);
10148                 kobject_put(kobj);
10149         }
10150
10151         if (block_group->has_caching_ctl)
10152                 caching_ctl = get_caching_control(block_group);
10153         if (block_group->cached == BTRFS_CACHE_STARTED)
10154                 wait_block_group_cache_done(block_group);
10155         if (block_group->has_caching_ctl) {
10156                 down_write(&root->fs_info->commit_root_sem);
10157                 if (!caching_ctl) {
10158                         struct btrfs_caching_control *ctl;
10159
10160                         list_for_each_entry(ctl,
10161                                     &root->fs_info->caching_block_groups, list)
10162                                 if (ctl->block_group == block_group) {
10163                                         caching_ctl = ctl;
10164                                         atomic_inc(&caching_ctl->count);
10165                                         break;
10166                                 }
10167                 }
10168                 if (caching_ctl)
10169                         list_del_init(&caching_ctl->list);
10170                 up_write(&root->fs_info->commit_root_sem);
10171                 if (caching_ctl) {
10172                         /* Once for the caching bgs list and once for us. */
10173                         put_caching_control(caching_ctl);
10174                         put_caching_control(caching_ctl);
10175                 }
10176         }
10177
10178         spin_lock(&trans->transaction->dirty_bgs_lock);
10179         if (!list_empty(&block_group->dirty_list)) {
10180                 WARN_ON(1);
10181         }
10182         if (!list_empty(&block_group->io_list)) {
10183                 WARN_ON(1);
10184         }
10185         spin_unlock(&trans->transaction->dirty_bgs_lock);
10186         btrfs_remove_free_space_cache(block_group);
10187
10188         spin_lock(&block_group->space_info->lock);
10189         list_del_init(&block_group->ro_list);
10190
10191         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10192                 WARN_ON(block_group->space_info->total_bytes
10193                         < block_group->key.offset);
10194                 WARN_ON(block_group->space_info->bytes_readonly
10195                         < block_group->key.offset);
10196                 WARN_ON(block_group->space_info->disk_total
10197                         < block_group->key.offset * factor);
10198         }
10199         block_group->space_info->total_bytes -= block_group->key.offset;
10200         block_group->space_info->bytes_readonly -= block_group->key.offset;
10201         block_group->space_info->disk_total -= block_group->key.offset * factor;
10202
10203         spin_unlock(&block_group->space_info->lock);
10204
10205         memcpy(&key, &block_group->key, sizeof(key));
10206
10207         lock_chunks(root);
10208         if (!list_empty(&em->list)) {
10209                 /* We're in the transaction->pending_chunks list. */
10210                 free_extent_map(em);
10211         }
10212         spin_lock(&block_group->lock);
10213         block_group->removed = 1;
10214         /*
10215          * At this point trimming can't start on this block group, because we
10216          * removed the block group from the tree fs_info->block_group_cache_tree
10217          * so no one can't find it anymore and even if someone already got this
10218          * block group before we removed it from the rbtree, they have already
10219          * incremented block_group->trimming - if they didn't, they won't find
10220          * any free space entries because we already removed them all when we
10221          * called btrfs_remove_free_space_cache().
10222          *
10223          * And we must not remove the extent map from the fs_info->mapping_tree
10224          * to prevent the same logical address range and physical device space
10225          * ranges from being reused for a new block group. This is because our
10226          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10227          * completely transactionless, so while it is trimming a range the
10228          * currently running transaction might finish and a new one start,
10229          * allowing for new block groups to be created that can reuse the same
10230          * physical device locations unless we take this special care.
10231          *
10232          * There may also be an implicit trim operation if the file system
10233          * is mounted with -odiscard. The same protections must remain
10234          * in place until the extents have been discarded completely when
10235          * the transaction commit has completed.
10236          */
10237         remove_em = (atomic_read(&block_group->trimming) == 0);
10238         /*
10239          * Make sure a trimmer task always sees the em in the pinned_chunks list
10240          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10241          * before checking block_group->removed).
10242          */
10243         if (!remove_em) {
10244                 /*
10245                  * Our em might be in trans->transaction->pending_chunks which
10246                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10247                  * and so is the fs_info->pinned_chunks list.
10248                  *
10249                  * So at this point we must be holding the chunk_mutex to avoid
10250                  * any races with chunk allocation (more specifically at
10251                  * volumes.c:contains_pending_extent()), to ensure it always
10252                  * sees the em, either in the pending_chunks list or in the
10253                  * pinned_chunks list.
10254                  */
10255                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10256         }
10257         spin_unlock(&block_group->lock);
10258
10259         if (remove_em) {
10260                 struct extent_map_tree *em_tree;
10261
10262                 em_tree = &root->fs_info->mapping_tree.map_tree;
10263                 write_lock(&em_tree->lock);
10264                 /*
10265                  * The em might be in the pending_chunks list, so make sure the
10266                  * chunk mutex is locked, since remove_extent_mapping() will
10267                  * delete us from that list.
10268                  */
10269                 remove_extent_mapping(em_tree, em);
10270                 write_unlock(&em_tree->lock);
10271                 /* once for the tree */
10272                 free_extent_map(em);
10273         }
10274
10275         unlock_chunks(root);
10276
10277         btrfs_put_block_group(block_group);
10278         btrfs_put_block_group(block_group);
10279
10280         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10281         if (ret > 0)
10282                 ret = -EIO;
10283         if (ret < 0)
10284                 goto out;
10285
10286         ret = btrfs_del_item(trans, root, path);
10287 out:
10288         btrfs_free_path(path);
10289         return ret;
10290 }
10291
10292 struct btrfs_trans_handle *
10293 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10294                                      const u64 chunk_offset)
10295 {
10296         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10297         struct extent_map *em;
10298         struct map_lookup *map;
10299         unsigned int num_items;
10300
10301         read_lock(&em_tree->lock);
10302         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10303         read_unlock(&em_tree->lock);
10304         ASSERT(em && em->start == chunk_offset);
10305
10306         /*
10307          * We need to reserve 3 + N units from the metadata space info in order
10308          * to remove a block group (done at btrfs_remove_chunk() and at
10309          * btrfs_remove_block_group()), which are used for:
10310          *
10311          * 1 unit for adding the free space inode's orphan (located in the tree
10312          * of tree roots).
10313          * 1 unit for deleting the block group item (located in the extent
10314          * tree).
10315          * 1 unit for deleting the free space item (located in tree of tree
10316          * roots).
10317          * N units for deleting N device extent items corresponding to each
10318          * stripe (located in the device tree).
10319          *
10320          * In order to remove a block group we also need to reserve units in the
10321          * system space info in order to update the chunk tree (update one or
10322          * more device items and remove one chunk item), but this is done at
10323          * btrfs_remove_chunk() through a call to check_system_chunk().
10324          */
10325         map = em->map_lookup;
10326         num_items = 3 + map->num_stripes;
10327         free_extent_map(em);
10328
10329         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10330                                                            num_items, 1);
10331 }
10332
10333 /*
10334  * Process the unused_bgs list and remove any that don't have any allocated
10335  * space inside of them.
10336  */
10337 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10338 {
10339         struct btrfs_block_group_cache *block_group;
10340         struct btrfs_space_info *space_info;
10341         struct btrfs_root *root = fs_info->extent_root;
10342         struct btrfs_trans_handle *trans;
10343         int ret = 0;
10344
10345         if (!fs_info->open)
10346                 return;
10347
10348         spin_lock(&fs_info->unused_bgs_lock);
10349         while (!list_empty(&fs_info->unused_bgs)) {
10350                 u64 start, end;
10351                 int trimming;
10352
10353                 block_group = list_first_entry(&fs_info->unused_bgs,
10354                                                struct btrfs_block_group_cache,
10355                                                bg_list);
10356                 list_del_init(&block_group->bg_list);
10357
10358                 space_info = block_group->space_info;
10359
10360                 if (ret || btrfs_mixed_space_info(space_info)) {
10361                         btrfs_put_block_group(block_group);
10362                         continue;
10363                 }
10364                 spin_unlock(&fs_info->unused_bgs_lock);
10365
10366                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10367
10368                 /* Don't want to race with allocators so take the groups_sem */
10369                 down_write(&space_info->groups_sem);
10370                 spin_lock(&block_group->lock);
10371                 if (block_group->reserved ||
10372                     btrfs_block_group_used(&block_group->item) ||
10373                     block_group->ro ||
10374                     list_is_singular(&block_group->list)) {
10375                         /*
10376                          * We want to bail if we made new allocations or have
10377                          * outstanding allocations in this block group.  We do
10378                          * the ro check in case balance is currently acting on
10379                          * this block group.
10380                          */
10381                         spin_unlock(&block_group->lock);
10382                         up_write(&space_info->groups_sem);
10383                         goto next;
10384                 }
10385                 spin_unlock(&block_group->lock);
10386
10387                 /* We don't want to force the issue, only flip if it's ok. */
10388                 ret = inc_block_group_ro(block_group, 0);
10389                 up_write(&space_info->groups_sem);
10390                 if (ret < 0) {
10391                         ret = 0;
10392                         goto next;
10393                 }
10394
10395                 /*
10396                  * Want to do this before we do anything else so we can recover
10397                  * properly if we fail to join the transaction.
10398                  */
10399                 trans = btrfs_start_trans_remove_block_group(fs_info,
10400                                                      block_group->key.objectid);
10401                 if (IS_ERR(trans)) {
10402                         btrfs_dec_block_group_ro(root, block_group);
10403                         ret = PTR_ERR(trans);
10404                         goto next;
10405                 }
10406
10407                 /*
10408                  * We could have pending pinned extents for this block group,
10409                  * just delete them, we don't care about them anymore.
10410                  */
10411                 start = block_group->key.objectid;
10412                 end = start + block_group->key.offset - 1;
10413                 /*
10414                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10415                  * btrfs_finish_extent_commit(). If we are at transaction N,
10416                  * another task might be running finish_extent_commit() for the
10417                  * previous transaction N - 1, and have seen a range belonging
10418                  * to the block group in freed_extents[] before we were able to
10419                  * clear the whole block group range from freed_extents[]. This
10420                  * means that task can lookup for the block group after we
10421                  * unpinned it from freed_extents[] and removed it, leading to
10422                  * a BUG_ON() at btrfs_unpin_extent_range().
10423                  */
10424                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10425                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10426                                   EXTENT_DIRTY, GFP_NOFS);
10427                 if (ret) {
10428                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10429                         btrfs_dec_block_group_ro(root, block_group);
10430                         goto end_trans;
10431                 }
10432                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10433                                   EXTENT_DIRTY, GFP_NOFS);
10434                 if (ret) {
10435                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10436                         btrfs_dec_block_group_ro(root, block_group);
10437                         goto end_trans;
10438                 }
10439                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10440
10441                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10442                 spin_lock(&space_info->lock);
10443                 spin_lock(&block_group->lock);
10444
10445                 space_info->bytes_pinned -= block_group->pinned;
10446                 space_info->bytes_readonly += block_group->pinned;
10447                 percpu_counter_add(&space_info->total_bytes_pinned,
10448                                    -block_group->pinned);
10449                 block_group->pinned = 0;
10450
10451                 spin_unlock(&block_group->lock);
10452                 spin_unlock(&space_info->lock);
10453
10454                 /* DISCARD can flip during remount */
10455                 trimming = btrfs_test_opt(root, DISCARD);
10456
10457                 /* Implicit trim during transaction commit. */
10458                 if (trimming)
10459                         btrfs_get_block_group_trimming(block_group);
10460
10461                 /*
10462                  * Btrfs_remove_chunk will abort the transaction if things go
10463                  * horribly wrong.
10464                  */
10465                 ret = btrfs_remove_chunk(trans, root,
10466                                          block_group->key.objectid);
10467
10468                 if (ret) {
10469                         if (trimming)
10470                                 btrfs_put_block_group_trimming(block_group);
10471                         goto end_trans;
10472                 }
10473
10474                 /*
10475                  * If we're not mounted with -odiscard, we can just forget
10476                  * about this block group. Otherwise we'll need to wait
10477                  * until transaction commit to do the actual discard.
10478                  */
10479                 if (trimming) {
10480                         spin_lock(&fs_info->unused_bgs_lock);
10481                         /*
10482                          * A concurrent scrub might have added us to the list
10483                          * fs_info->unused_bgs, so use a list_move operation
10484                          * to add the block group to the deleted_bgs list.
10485                          */
10486                         list_move(&block_group->bg_list,
10487                                   &trans->transaction->deleted_bgs);
10488                         spin_unlock(&fs_info->unused_bgs_lock);
10489                         btrfs_get_block_group(block_group);
10490                 }
10491 end_trans:
10492                 btrfs_end_transaction(trans, root);
10493 next:
10494                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10495                 btrfs_put_block_group(block_group);
10496                 spin_lock(&fs_info->unused_bgs_lock);
10497         }
10498         spin_unlock(&fs_info->unused_bgs_lock);
10499 }
10500
10501 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10502 {
10503         struct btrfs_space_info *space_info;
10504         struct btrfs_super_block *disk_super;
10505         u64 features;
10506         u64 flags;
10507         int mixed = 0;
10508         int ret;
10509
10510         disk_super = fs_info->super_copy;
10511         if (!btrfs_super_root(disk_super))
10512                 return 1;
10513
10514         features = btrfs_super_incompat_flags(disk_super);
10515         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10516                 mixed = 1;
10517
10518         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10519         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10520         if (ret)
10521                 goto out;
10522
10523         if (mixed) {
10524                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10525                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10526         } else {
10527                 flags = BTRFS_BLOCK_GROUP_METADATA;
10528                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10529                 if (ret)
10530                         goto out;
10531
10532                 flags = BTRFS_BLOCK_GROUP_DATA;
10533                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10534         }
10535 out:
10536         return ret;
10537 }
10538
10539 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10540 {
10541         return unpin_extent_range(root, start, end, false);
10542 }
10543
10544 /*
10545  * It used to be that old block groups would be left around forever.
10546  * Iterating over them would be enough to trim unused space.  Since we
10547  * now automatically remove them, we also need to iterate over unallocated
10548  * space.
10549  *
10550  * We don't want a transaction for this since the discard may take a
10551  * substantial amount of time.  We don't require that a transaction be
10552  * running, but we do need to take a running transaction into account
10553  * to ensure that we're not discarding chunks that were released in
10554  * the current transaction.
10555  *
10556  * Holding the chunks lock will prevent other threads from allocating
10557  * or releasing chunks, but it won't prevent a running transaction
10558  * from committing and releasing the memory that the pending chunks
10559  * list head uses.  For that, we need to take a reference to the
10560  * transaction.
10561  */
10562 static int btrfs_trim_free_extents(struct btrfs_device *device,
10563                                    u64 minlen, u64 *trimmed)
10564 {
10565         u64 start = 0, len = 0;
10566         int ret;
10567
10568         *trimmed = 0;
10569
10570         /* Not writeable = nothing to do. */
10571         if (!device->writeable)
10572                 return 0;
10573
10574         /* No free space = nothing to do. */
10575         if (device->total_bytes <= device->bytes_used)
10576                 return 0;
10577
10578         ret = 0;
10579
10580         while (1) {
10581                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10582                 struct btrfs_transaction *trans;
10583                 u64 bytes;
10584
10585                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10586                 if (ret)
10587                         return ret;
10588
10589                 down_read(&fs_info->commit_root_sem);
10590
10591                 spin_lock(&fs_info->trans_lock);
10592                 trans = fs_info->running_transaction;
10593                 if (trans)
10594                         atomic_inc(&trans->use_count);
10595                 spin_unlock(&fs_info->trans_lock);
10596
10597                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10598                                                  &start, &len);
10599                 if (trans)
10600                         btrfs_put_transaction(trans);
10601
10602                 if (ret) {
10603                         up_read(&fs_info->commit_root_sem);
10604                         mutex_unlock(&fs_info->chunk_mutex);
10605                         if (ret == -ENOSPC)
10606                                 ret = 0;
10607                         break;
10608                 }
10609
10610                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10611                 up_read(&fs_info->commit_root_sem);
10612                 mutex_unlock(&fs_info->chunk_mutex);
10613
10614                 if (ret)
10615                         break;
10616
10617                 start += len;
10618                 *trimmed += bytes;
10619
10620                 if (fatal_signal_pending(current)) {
10621                         ret = -ERESTARTSYS;
10622                         break;
10623                 }
10624
10625                 cond_resched();
10626         }
10627
10628         return ret;
10629 }
10630
10631 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10632 {
10633         struct btrfs_fs_info *fs_info = root->fs_info;
10634         struct btrfs_block_group_cache *cache = NULL;
10635         struct btrfs_device *device;
10636         struct list_head *devices;
10637         u64 group_trimmed;
10638         u64 start;
10639         u64 end;
10640         u64 trimmed = 0;
10641         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10642         int ret = 0;
10643
10644         /*
10645          * try to trim all FS space, our block group may start from non-zero.
10646          */
10647         if (range->len == total_bytes)
10648                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10649         else
10650                 cache = btrfs_lookup_block_group(fs_info, range->start);
10651
10652         while (cache) {
10653                 if (cache->key.objectid >= (range->start + range->len)) {
10654                         btrfs_put_block_group(cache);
10655                         break;
10656                 }
10657
10658                 start = max(range->start, cache->key.objectid);
10659                 end = min(range->start + range->len,
10660                                 cache->key.objectid + cache->key.offset);
10661
10662                 if (end - start >= range->minlen) {
10663                         if (!block_group_cache_done(cache)) {
10664                                 ret = cache_block_group(cache, 0);
10665                                 if (ret) {
10666                                         btrfs_put_block_group(cache);
10667                                         break;
10668                                 }
10669                                 ret = wait_block_group_cache_done(cache);
10670                                 if (ret) {
10671                                         btrfs_put_block_group(cache);
10672                                         break;
10673                                 }
10674                         }
10675                         ret = btrfs_trim_block_group(cache,
10676                                                      &group_trimmed,
10677                                                      start,
10678                                                      end,
10679                                                      range->minlen);
10680
10681                         trimmed += group_trimmed;
10682                         if (ret) {
10683                                 btrfs_put_block_group(cache);
10684                                 break;
10685                         }
10686                 }
10687
10688                 cache = next_block_group(fs_info->tree_root, cache);
10689         }
10690
10691         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10692         devices = &root->fs_info->fs_devices->alloc_list;
10693         list_for_each_entry(device, devices, dev_alloc_list) {
10694                 ret = btrfs_trim_free_extents(device, range->minlen,
10695                                               &group_trimmed);
10696                 if (ret)
10697                         break;
10698
10699                 trimmed += group_trimmed;
10700         }
10701         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10702
10703         range->len = trimmed;
10704         return ret;
10705 }
10706
10707 /*
10708  * btrfs_{start,end}_write_no_snapshoting() are similar to
10709  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10710  * data into the page cache through nocow before the subvolume is snapshoted,
10711  * but flush the data into disk after the snapshot creation, or to prevent
10712  * operations while snapshoting is ongoing and that cause the snapshot to be
10713  * inconsistent (writes followed by expanding truncates for example).
10714  */
10715 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10716 {
10717         percpu_counter_dec(&root->subv_writers->counter);
10718         /*
10719          * Make sure counter is updated before we wake up waiters.
10720          */
10721         smp_mb();
10722         if (waitqueue_active(&root->subv_writers->wait))
10723                 wake_up(&root->subv_writers->wait);
10724 }
10725
10726 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10727 {
10728         if (atomic_read(&root->will_be_snapshoted))
10729                 return 0;
10730
10731         percpu_counter_inc(&root->subv_writers->counter);
10732         /*
10733          * Make sure counter is updated before we check for snapshot creation.
10734          */
10735         smp_mb();
10736         if (atomic_read(&root->will_be_snapshoted)) {
10737                 btrfs_end_write_no_snapshoting(root);
10738                 return 0;
10739         }
10740         return 1;
10741 }