Btrfs: keep track of max_extent_size per space_info
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337                                 struct btrfs_block_group_cache *block_group)
338 {
339         u64 start = block_group->key.objectid;
340         u64 len = block_group->key.offset;
341         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342                 root->nodesize : root->sectorsize;
343         u64 step = chunk << 1;
344
345         while (len > chunk) {
346                 btrfs_remove_free_space(block_group, start, chunk);
347                 start += step;
348                 if (len < step)
349                         len = 0;
350                 else
351                         len -= step;
352         }
353 }
354 #endif
355
356 /*
357  * this is only called by cache_block_group, since we could have freed extents
358  * we need to check the pinned_extents for any extents that can't be used yet
359  * since their free space will be released as soon as the transaction commits.
360  */
361 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362                               struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364         u64 extent_start, extent_end, size, total_added = 0;
365         int ret;
366
367         while (start < end) {
368                 ret = find_first_extent_bit(info->pinned_extents, start,
369                                             &extent_start, &extent_end,
370                                             EXTENT_DIRTY | EXTENT_UPTODATE,
371                                             NULL);
372                 if (ret)
373                         break;
374
375                 if (extent_start <= start) {
376                         start = extent_end + 1;
377                 } else if (extent_start > start && extent_start < end) {
378                         size = extent_start - start;
379                         total_added += size;
380                         ret = btrfs_add_free_space(block_group, start,
381                                                    size);
382                         BUG_ON(ret); /* -ENOMEM or logic error */
383                         start = extent_end + 1;
384                 } else {
385                         break;
386                 }
387         }
388
389         if (start < end) {
390                 size = end - start;
391                 total_added += size;
392                 ret = btrfs_add_free_space(block_group, start, size);
393                 BUG_ON(ret); /* -ENOMEM or logic error */
394         }
395
396         return total_added;
397 }
398
399 static noinline void caching_thread(struct btrfs_work *work)
400 {
401         struct btrfs_block_group_cache *block_group;
402         struct btrfs_fs_info *fs_info;
403         struct btrfs_caching_control *caching_ctl;
404         struct btrfs_root *extent_root;
405         struct btrfs_path *path;
406         struct extent_buffer *leaf;
407         struct btrfs_key key;
408         u64 total_found = 0;
409         u64 last = 0;
410         u32 nritems;
411         int ret = -ENOMEM;
412         bool wakeup = true;
413
414         caching_ctl = container_of(work, struct btrfs_caching_control, work);
415         block_group = caching_ctl->block_group;
416         fs_info = block_group->fs_info;
417         extent_root = fs_info->extent_root;
418
419         path = btrfs_alloc_path();
420         if (!path)
421                 goto out;
422
423         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424
425 #ifdef CONFIG_BTRFS_DEBUG
426         /*
427          * If we're fragmenting we don't want to make anybody think we can
428          * allocate from this block group until we've had a chance to fragment
429          * the free space.
430          */
431         if (btrfs_should_fragment_free_space(extent_root, block_group))
432                 wakeup = false;
433 #endif
434         /*
435          * We don't want to deadlock with somebody trying to allocate a new
436          * extent for the extent root while also trying to search the extent
437          * root to add free space.  So we skip locking and search the commit
438          * root, since its read-only
439          */
440         path->skip_locking = 1;
441         path->search_commit_root = 1;
442         path->reada = 1;
443
444         key.objectid = last;
445         key.offset = 0;
446         key.type = BTRFS_EXTENT_ITEM_KEY;
447 again:
448         mutex_lock(&caching_ctl->mutex);
449         /* need to make sure the commit_root doesn't disappear */
450         down_read(&fs_info->commit_root_sem);
451
452 next:
453         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
454         if (ret < 0)
455                 goto err;
456
457         leaf = path->nodes[0];
458         nritems = btrfs_header_nritems(leaf);
459
460         while (1) {
461                 if (btrfs_fs_closing(fs_info) > 1) {
462                         last = (u64)-1;
463                         break;
464                 }
465
466                 if (path->slots[0] < nritems) {
467                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
468                 } else {
469                         ret = find_next_key(path, 0, &key);
470                         if (ret)
471                                 break;
472
473                         if (need_resched() ||
474                             rwsem_is_contended(&fs_info->commit_root_sem)) {
475                                 if (wakeup)
476                                         caching_ctl->progress = last;
477                                 btrfs_release_path(path);
478                                 up_read(&fs_info->commit_root_sem);
479                                 mutex_unlock(&caching_ctl->mutex);
480                                 cond_resched();
481                                 goto again;
482                         }
483
484                         ret = btrfs_next_leaf(extent_root, path);
485                         if (ret < 0)
486                                 goto err;
487                         if (ret)
488                                 break;
489                         leaf = path->nodes[0];
490                         nritems = btrfs_header_nritems(leaf);
491                         continue;
492                 }
493
494                 if (key.objectid < last) {
495                         key.objectid = last;
496                         key.offset = 0;
497                         key.type = BTRFS_EXTENT_ITEM_KEY;
498
499                         if (wakeup)
500                                 caching_ctl->progress = last;
501                         btrfs_release_path(path);
502                         goto next;
503                 }
504
505                 if (key.objectid < block_group->key.objectid) {
506                         path->slots[0]++;
507                         continue;
508                 }
509
510                 if (key.objectid >= block_group->key.objectid +
511                     block_group->key.offset)
512                         break;
513
514                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
515                     key.type == BTRFS_METADATA_ITEM_KEY) {
516                         total_found += add_new_free_space(block_group,
517                                                           fs_info, last,
518                                                           key.objectid);
519                         if (key.type == BTRFS_METADATA_ITEM_KEY)
520                                 last = key.objectid +
521                                         fs_info->tree_root->nodesize;
522                         else
523                                 last = key.objectid + key.offset;
524
525                         if (total_found > (1024 * 1024 * 2)) {
526                                 total_found = 0;
527                                 if (wakeup)
528                                         wake_up(&caching_ctl->wait);
529                         }
530                 }
531                 path->slots[0]++;
532         }
533         ret = 0;
534
535         total_found += add_new_free_space(block_group, fs_info, last,
536                                           block_group->key.objectid +
537                                           block_group->key.offset);
538         spin_lock(&block_group->lock);
539         block_group->caching_ctl = NULL;
540         block_group->cached = BTRFS_CACHE_FINISHED;
541         spin_unlock(&block_group->lock);
542
543 #ifdef CONFIG_BTRFS_DEBUG
544         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
545                 u64 bytes_used;
546
547                 spin_lock(&block_group->space_info->lock);
548                 spin_lock(&block_group->lock);
549                 bytes_used = block_group->key.offset -
550                         btrfs_block_group_used(&block_group->item);
551                 block_group->space_info->bytes_used += bytes_used >> 1;
552                 spin_unlock(&block_group->lock);
553                 spin_unlock(&block_group->space_info->lock);
554                 fragment_free_space(extent_root, block_group);
555         }
556 #endif
557
558         caching_ctl->progress = (u64)-1;
559 err:
560         btrfs_free_path(path);
561         up_read(&fs_info->commit_root_sem);
562
563         free_excluded_extents(extent_root, block_group);
564
565         mutex_unlock(&caching_ctl->mutex);
566 out:
567         if (ret) {
568                 spin_lock(&block_group->lock);
569                 block_group->caching_ctl = NULL;
570                 block_group->cached = BTRFS_CACHE_ERROR;
571                 spin_unlock(&block_group->lock);
572         }
573         wake_up(&caching_ctl->wait);
574
575         put_caching_control(caching_ctl);
576         btrfs_put_block_group(block_group);
577 }
578
579 static int cache_block_group(struct btrfs_block_group_cache *cache,
580                              int load_cache_only)
581 {
582         DEFINE_WAIT(wait);
583         struct btrfs_fs_info *fs_info = cache->fs_info;
584         struct btrfs_caching_control *caching_ctl;
585         int ret = 0;
586
587         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
588         if (!caching_ctl)
589                 return -ENOMEM;
590
591         INIT_LIST_HEAD(&caching_ctl->list);
592         mutex_init(&caching_ctl->mutex);
593         init_waitqueue_head(&caching_ctl->wait);
594         caching_ctl->block_group = cache;
595         caching_ctl->progress = cache->key.objectid;
596         atomic_set(&caching_ctl->count, 1);
597         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
598                         caching_thread, NULL, NULL);
599
600         spin_lock(&cache->lock);
601         /*
602          * This should be a rare occasion, but this could happen I think in the
603          * case where one thread starts to load the space cache info, and then
604          * some other thread starts a transaction commit which tries to do an
605          * allocation while the other thread is still loading the space cache
606          * info.  The previous loop should have kept us from choosing this block
607          * group, but if we've moved to the state where we will wait on caching
608          * block groups we need to first check if we're doing a fast load here,
609          * so we can wait for it to finish, otherwise we could end up allocating
610          * from a block group who's cache gets evicted for one reason or
611          * another.
612          */
613         while (cache->cached == BTRFS_CACHE_FAST) {
614                 struct btrfs_caching_control *ctl;
615
616                 ctl = cache->caching_ctl;
617                 atomic_inc(&ctl->count);
618                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
619                 spin_unlock(&cache->lock);
620
621                 schedule();
622
623                 finish_wait(&ctl->wait, &wait);
624                 put_caching_control(ctl);
625                 spin_lock(&cache->lock);
626         }
627
628         if (cache->cached != BTRFS_CACHE_NO) {
629                 spin_unlock(&cache->lock);
630                 kfree(caching_ctl);
631                 return 0;
632         }
633         WARN_ON(cache->caching_ctl);
634         cache->caching_ctl = caching_ctl;
635         cache->cached = BTRFS_CACHE_FAST;
636         spin_unlock(&cache->lock);
637
638         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
639                 mutex_lock(&caching_ctl->mutex);
640                 ret = load_free_space_cache(fs_info, cache);
641
642                 spin_lock(&cache->lock);
643                 if (ret == 1) {
644                         cache->caching_ctl = NULL;
645                         cache->cached = BTRFS_CACHE_FINISHED;
646                         cache->last_byte_to_unpin = (u64)-1;
647                         caching_ctl->progress = (u64)-1;
648                 } else {
649                         if (load_cache_only) {
650                                 cache->caching_ctl = NULL;
651                                 cache->cached = BTRFS_CACHE_NO;
652                         } else {
653                                 cache->cached = BTRFS_CACHE_STARTED;
654                                 cache->has_caching_ctl = 1;
655                         }
656                 }
657                 spin_unlock(&cache->lock);
658 #ifdef CONFIG_BTRFS_DEBUG
659                 if (ret == 1 &&
660                     btrfs_should_fragment_free_space(fs_info->extent_root,
661                                                      cache)) {
662                         u64 bytes_used;
663
664                         spin_lock(&cache->space_info->lock);
665                         spin_lock(&cache->lock);
666                         bytes_used = cache->key.offset -
667                                 btrfs_block_group_used(&cache->item);
668                         cache->space_info->bytes_used += bytes_used >> 1;
669                         spin_unlock(&cache->lock);
670                         spin_unlock(&cache->space_info->lock);
671                         fragment_free_space(fs_info->extent_root, cache);
672                 }
673 #endif
674                 mutex_unlock(&caching_ctl->mutex);
675
676                 wake_up(&caching_ctl->wait);
677                 if (ret == 1) {
678                         put_caching_control(caching_ctl);
679                         free_excluded_extents(fs_info->extent_root, cache);
680                         return 0;
681                 }
682         } else {
683                 /*
684                  * We are not going to do the fast caching, set cached to the
685                  * appropriate value and wakeup any waiters.
686                  */
687                 spin_lock(&cache->lock);
688                 if (load_cache_only) {
689                         cache->caching_ctl = NULL;
690                         cache->cached = BTRFS_CACHE_NO;
691                 } else {
692                         cache->cached = BTRFS_CACHE_STARTED;
693                         cache->has_caching_ctl = 1;
694                 }
695                 spin_unlock(&cache->lock);
696                 wake_up(&caching_ctl->wait);
697         }
698
699         if (load_cache_only) {
700                 put_caching_control(caching_ctl);
701                 return 0;
702         }
703
704         down_write(&fs_info->commit_root_sem);
705         atomic_inc(&caching_ctl->count);
706         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
707         up_write(&fs_info->commit_root_sem);
708
709         btrfs_get_block_group(cache);
710
711         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
712
713         return ret;
714 }
715
716 /*
717  * return the block group that starts at or after bytenr
718  */
719 static struct btrfs_block_group_cache *
720 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
721 {
722         struct btrfs_block_group_cache *cache;
723
724         cache = block_group_cache_tree_search(info, bytenr, 0);
725
726         return cache;
727 }
728
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733                                                  struct btrfs_fs_info *info,
734                                                  u64 bytenr)
735 {
736         struct btrfs_block_group_cache *cache;
737
738         cache = block_group_cache_tree_search(info, bytenr, 1);
739
740         return cache;
741 }
742
743 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
744                                                   u64 flags)
745 {
746         struct list_head *head = &info->space_info;
747         struct btrfs_space_info *found;
748
749         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
750
751         rcu_read_lock();
752         list_for_each_entry_rcu(found, head, list) {
753                 if (found->flags & flags) {
754                         rcu_read_unlock();
755                         return found;
756                 }
757         }
758         rcu_read_unlock();
759         return NULL;
760 }
761
762 /*
763  * after adding space to the filesystem, we need to clear the full flags
764  * on all the space infos.
765  */
766 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
767 {
768         struct list_head *head = &info->space_info;
769         struct btrfs_space_info *found;
770
771         rcu_read_lock();
772         list_for_each_entry_rcu(found, head, list)
773                 found->full = 0;
774         rcu_read_unlock();
775 }
776
777 /* simple helper to search for an existing data extent at a given offset */
778 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
779 {
780         int ret;
781         struct btrfs_key key;
782         struct btrfs_path *path;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         key.objectid = start;
789         key.offset = len;
790         key.type = BTRFS_EXTENT_ITEM_KEY;
791         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
792                                 0, 0);
793         btrfs_free_path(path);
794         return ret;
795 }
796
797 /*
798  * helper function to lookup reference count and flags of a tree block.
799  *
800  * the head node for delayed ref is used to store the sum of all the
801  * reference count modifications queued up in the rbtree. the head
802  * node may also store the extent flags to set. This way you can check
803  * to see what the reference count and extent flags would be if all of
804  * the delayed refs are not processed.
805  */
806 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
807                              struct btrfs_root *root, u64 bytenr,
808                              u64 offset, int metadata, u64 *refs, u64 *flags)
809 {
810         struct btrfs_delayed_ref_head *head;
811         struct btrfs_delayed_ref_root *delayed_refs;
812         struct btrfs_path *path;
813         struct btrfs_extent_item *ei;
814         struct extent_buffer *leaf;
815         struct btrfs_key key;
816         u32 item_size;
817         u64 num_refs;
818         u64 extent_flags;
819         int ret;
820
821         /*
822          * If we don't have skinny metadata, don't bother doing anything
823          * different
824          */
825         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
826                 offset = root->nodesize;
827                 metadata = 0;
828         }
829
830         path = btrfs_alloc_path();
831         if (!path)
832                 return -ENOMEM;
833
834         if (!trans) {
835                 path->skip_locking = 1;
836                 path->search_commit_root = 1;
837         }
838
839 search_again:
840         key.objectid = bytenr;
841         key.offset = offset;
842         if (metadata)
843                 key.type = BTRFS_METADATA_ITEM_KEY;
844         else
845                 key.type = BTRFS_EXTENT_ITEM_KEY;
846
847         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
848                                 &key, path, 0, 0);
849         if (ret < 0)
850                 goto out_free;
851
852         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
853                 if (path->slots[0]) {
854                         path->slots[0]--;
855                         btrfs_item_key_to_cpu(path->nodes[0], &key,
856                                               path->slots[0]);
857                         if (key.objectid == bytenr &&
858                             key.type == BTRFS_EXTENT_ITEM_KEY &&
859                             key.offset == root->nodesize)
860                                 ret = 0;
861                 }
862         }
863
864         if (ret == 0) {
865                 leaf = path->nodes[0];
866                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
867                 if (item_size >= sizeof(*ei)) {
868                         ei = btrfs_item_ptr(leaf, path->slots[0],
869                                             struct btrfs_extent_item);
870                         num_refs = btrfs_extent_refs(leaf, ei);
871                         extent_flags = btrfs_extent_flags(leaf, ei);
872                 } else {
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874                         struct btrfs_extent_item_v0 *ei0;
875                         BUG_ON(item_size != sizeof(*ei0));
876                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
877                                              struct btrfs_extent_item_v0);
878                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
879                         /* FIXME: this isn't correct for data */
880                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
881 #else
882                         BUG();
883 #endif
884                 }
885                 BUG_ON(num_refs == 0);
886         } else {
887                 num_refs = 0;
888                 extent_flags = 0;
889                 ret = 0;
890         }
891
892         if (!trans)
893                 goto out;
894
895         delayed_refs = &trans->transaction->delayed_refs;
896         spin_lock(&delayed_refs->lock);
897         head = btrfs_find_delayed_ref_head(trans, bytenr);
898         if (head) {
899                 if (!mutex_trylock(&head->mutex)) {
900                         atomic_inc(&head->node.refs);
901                         spin_unlock(&delayed_refs->lock);
902
903                         btrfs_release_path(path);
904
905                         /*
906                          * Mutex was contended, block until it's released and try
907                          * again
908                          */
909                         mutex_lock(&head->mutex);
910                         mutex_unlock(&head->mutex);
911                         btrfs_put_delayed_ref(&head->node);
912                         goto search_again;
913                 }
914                 spin_lock(&head->lock);
915                 if (head->extent_op && head->extent_op->update_flags)
916                         extent_flags |= head->extent_op->flags_to_set;
917                 else
918                         BUG_ON(num_refs == 0);
919
920                 num_refs += head->node.ref_mod;
921                 spin_unlock(&head->lock);
922                 mutex_unlock(&head->mutex);
923         }
924         spin_unlock(&delayed_refs->lock);
925 out:
926         WARN_ON(num_refs == 0);
927         if (refs)
928                 *refs = num_refs;
929         if (flags)
930                 *flags = extent_flags;
931 out_free:
932         btrfs_free_path(path);
933         return ret;
934 }
935
936 /*
937  * Back reference rules.  Back refs have three main goals:
938  *
939  * 1) differentiate between all holders of references to an extent so that
940  *    when a reference is dropped we can make sure it was a valid reference
941  *    before freeing the extent.
942  *
943  * 2) Provide enough information to quickly find the holders of an extent
944  *    if we notice a given block is corrupted or bad.
945  *
946  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
947  *    maintenance.  This is actually the same as #2, but with a slightly
948  *    different use case.
949  *
950  * There are two kinds of back refs. The implicit back refs is optimized
951  * for pointers in non-shared tree blocks. For a given pointer in a block,
952  * back refs of this kind provide information about the block's owner tree
953  * and the pointer's key. These information allow us to find the block by
954  * b-tree searching. The full back refs is for pointers in tree blocks not
955  * referenced by their owner trees. The location of tree block is recorded
956  * in the back refs. Actually the full back refs is generic, and can be
957  * used in all cases the implicit back refs is used. The major shortcoming
958  * of the full back refs is its overhead. Every time a tree block gets
959  * COWed, we have to update back refs entry for all pointers in it.
960  *
961  * For a newly allocated tree block, we use implicit back refs for
962  * pointers in it. This means most tree related operations only involve
963  * implicit back refs. For a tree block created in old transaction, the
964  * only way to drop a reference to it is COW it. So we can detect the
965  * event that tree block loses its owner tree's reference and do the
966  * back refs conversion.
967  *
968  * When a tree block is COW'd through a tree, there are four cases:
969  *
970  * The reference count of the block is one and the tree is the block's
971  * owner tree. Nothing to do in this case.
972  *
973  * The reference count of the block is one and the tree is not the
974  * block's owner tree. In this case, full back refs is used for pointers
975  * in the block. Remove these full back refs, add implicit back refs for
976  * every pointers in the new block.
977  *
978  * The reference count of the block is greater than one and the tree is
979  * the block's owner tree. In this case, implicit back refs is used for
980  * pointers in the block. Add full back refs for every pointers in the
981  * block, increase lower level extents' reference counts. The original
982  * implicit back refs are entailed to the new block.
983  *
984  * The reference count of the block is greater than one and the tree is
985  * not the block's owner tree. Add implicit back refs for every pointer in
986  * the new block, increase lower level extents' reference count.
987  *
988  * Back Reference Key composing:
989  *
990  * The key objectid corresponds to the first byte in the extent,
991  * The key type is used to differentiate between types of back refs.
992  * There are different meanings of the key offset for different types
993  * of back refs.
994  *
995  * File extents can be referenced by:
996  *
997  * - multiple snapshots, subvolumes, or different generations in one subvol
998  * - different files inside a single subvolume
999  * - different offsets inside a file (bookend extents in file.c)
1000  *
1001  * The extent ref structure for the implicit back refs has fields for:
1002  *
1003  * - Objectid of the subvolume root
1004  * - objectid of the file holding the reference
1005  * - original offset in the file
1006  * - how many bookend extents
1007  *
1008  * The key offset for the implicit back refs is hash of the first
1009  * three fields.
1010  *
1011  * The extent ref structure for the full back refs has field for:
1012  *
1013  * - number of pointers in the tree leaf
1014  *
1015  * The key offset for the implicit back refs is the first byte of
1016  * the tree leaf
1017  *
1018  * When a file extent is allocated, The implicit back refs is used.
1019  * the fields are filled in:
1020  *
1021  *     (root_key.objectid, inode objectid, offset in file, 1)
1022  *
1023  * When a file extent is removed file truncation, we find the
1024  * corresponding implicit back refs and check the following fields:
1025  *
1026  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1027  *
1028  * Btree extents can be referenced by:
1029  *
1030  * - Different subvolumes
1031  *
1032  * Both the implicit back refs and the full back refs for tree blocks
1033  * only consist of key. The key offset for the implicit back refs is
1034  * objectid of block's owner tree. The key offset for the full back refs
1035  * is the first byte of parent block.
1036  *
1037  * When implicit back refs is used, information about the lowest key and
1038  * level of the tree block are required. These information are stored in
1039  * tree block info structure.
1040  */
1041
1042 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1043 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1044                                   struct btrfs_root *root,
1045                                   struct btrfs_path *path,
1046                                   u64 owner, u32 extra_size)
1047 {
1048         struct btrfs_extent_item *item;
1049         struct btrfs_extent_item_v0 *ei0;
1050         struct btrfs_extent_ref_v0 *ref0;
1051         struct btrfs_tree_block_info *bi;
1052         struct extent_buffer *leaf;
1053         struct btrfs_key key;
1054         struct btrfs_key found_key;
1055         u32 new_size = sizeof(*item);
1056         u64 refs;
1057         int ret;
1058
1059         leaf = path->nodes[0];
1060         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1061
1062         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1063         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1064                              struct btrfs_extent_item_v0);
1065         refs = btrfs_extent_refs_v0(leaf, ei0);
1066
1067         if (owner == (u64)-1) {
1068                 while (1) {
1069                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1070                                 ret = btrfs_next_leaf(root, path);
1071                                 if (ret < 0)
1072                                         return ret;
1073                                 BUG_ON(ret > 0); /* Corruption */
1074                                 leaf = path->nodes[0];
1075                         }
1076                         btrfs_item_key_to_cpu(leaf, &found_key,
1077                                               path->slots[0]);
1078                         BUG_ON(key.objectid != found_key.objectid);
1079                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1080                                 path->slots[0]++;
1081                                 continue;
1082                         }
1083                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1084                                               struct btrfs_extent_ref_v0);
1085                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1086                         break;
1087                 }
1088         }
1089         btrfs_release_path(path);
1090
1091         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1092                 new_size += sizeof(*bi);
1093
1094         new_size -= sizeof(*ei0);
1095         ret = btrfs_search_slot(trans, root, &key, path,
1096                                 new_size + extra_size, 1);
1097         if (ret < 0)
1098                 return ret;
1099         BUG_ON(ret); /* Corruption */
1100
1101         btrfs_extend_item(root, path, new_size);
1102
1103         leaf = path->nodes[0];
1104         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1105         btrfs_set_extent_refs(leaf, item, refs);
1106         /* FIXME: get real generation */
1107         btrfs_set_extent_generation(leaf, item, 0);
1108         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1109                 btrfs_set_extent_flags(leaf, item,
1110                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1111                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1112                 bi = (struct btrfs_tree_block_info *)(item + 1);
1113                 /* FIXME: get first key of the block */
1114                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1115                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1116         } else {
1117                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1118         }
1119         btrfs_mark_buffer_dirty(leaf);
1120         return 0;
1121 }
1122 #endif
1123
1124 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1125 {
1126         u32 high_crc = ~(u32)0;
1127         u32 low_crc = ~(u32)0;
1128         __le64 lenum;
1129
1130         lenum = cpu_to_le64(root_objectid);
1131         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1132         lenum = cpu_to_le64(owner);
1133         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1134         lenum = cpu_to_le64(offset);
1135         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136
1137         return ((u64)high_crc << 31) ^ (u64)low_crc;
1138 }
1139
1140 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1141                                      struct btrfs_extent_data_ref *ref)
1142 {
1143         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1144                                     btrfs_extent_data_ref_objectid(leaf, ref),
1145                                     btrfs_extent_data_ref_offset(leaf, ref));
1146 }
1147
1148 static int match_extent_data_ref(struct extent_buffer *leaf,
1149                                  struct btrfs_extent_data_ref *ref,
1150                                  u64 root_objectid, u64 owner, u64 offset)
1151 {
1152         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1153             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1154             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1155                 return 0;
1156         return 1;
1157 }
1158
1159 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid,
1164                                            u64 owner, u64 offset)
1165 {
1166         struct btrfs_key key;
1167         struct btrfs_extent_data_ref *ref;
1168         struct extent_buffer *leaf;
1169         u32 nritems;
1170         int ret;
1171         int recow;
1172         int err = -ENOENT;
1173
1174         key.objectid = bytenr;
1175         if (parent) {
1176                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1177                 key.offset = parent;
1178         } else {
1179                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1180                 key.offset = hash_extent_data_ref(root_objectid,
1181                                                   owner, offset);
1182         }
1183 again:
1184         recow = 0;
1185         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1186         if (ret < 0) {
1187                 err = ret;
1188                 goto fail;
1189         }
1190
1191         if (parent) {
1192                 if (!ret)
1193                         return 0;
1194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1195                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1196                 btrfs_release_path(path);
1197                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1198                 if (ret < 0) {
1199                         err = ret;
1200                         goto fail;
1201                 }
1202                 if (!ret)
1203                         return 0;
1204 #endif
1205                 goto fail;
1206         }
1207
1208         leaf = path->nodes[0];
1209         nritems = btrfs_header_nritems(leaf);
1210         while (1) {
1211                 if (path->slots[0] >= nritems) {
1212                         ret = btrfs_next_leaf(root, path);
1213                         if (ret < 0)
1214                                 err = ret;
1215                         if (ret)
1216                                 goto fail;
1217
1218                         leaf = path->nodes[0];
1219                         nritems = btrfs_header_nritems(leaf);
1220                         recow = 1;
1221                 }
1222
1223                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1224                 if (key.objectid != bytenr ||
1225                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1226                         goto fail;
1227
1228                 ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                      struct btrfs_extent_data_ref);
1230
1231                 if (match_extent_data_ref(leaf, ref, root_objectid,
1232                                           owner, offset)) {
1233                         if (recow) {
1234                                 btrfs_release_path(path);
1235                                 goto again;
1236                         }
1237                         err = 0;
1238                         break;
1239                 }
1240                 path->slots[0]++;
1241         }
1242 fail:
1243         return err;
1244 }
1245
1246 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1247                                            struct btrfs_root *root,
1248                                            struct btrfs_path *path,
1249                                            u64 bytenr, u64 parent,
1250                                            u64 root_objectid, u64 owner,
1251                                            u64 offset, int refs_to_add)
1252 {
1253         struct btrfs_key key;
1254         struct extent_buffer *leaf;
1255         u32 size;
1256         u32 num_refs;
1257         int ret;
1258
1259         key.objectid = bytenr;
1260         if (parent) {
1261                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1262                 key.offset = parent;
1263                 size = sizeof(struct btrfs_shared_data_ref);
1264         } else {
1265                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1266                 key.offset = hash_extent_data_ref(root_objectid,
1267                                                   owner, offset);
1268                 size = sizeof(struct btrfs_extent_data_ref);
1269         }
1270
1271         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1272         if (ret && ret != -EEXIST)
1273                 goto fail;
1274
1275         leaf = path->nodes[0];
1276         if (parent) {
1277                 struct btrfs_shared_data_ref *ref;
1278                 ref = btrfs_item_ptr(leaf, path->slots[0],
1279                                      struct btrfs_shared_data_ref);
1280                 if (ret == 0) {
1281                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1282                 } else {
1283                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1284                         num_refs += refs_to_add;
1285                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1286                 }
1287         } else {
1288                 struct btrfs_extent_data_ref *ref;
1289                 while (ret == -EEXIST) {
1290                         ref = btrfs_item_ptr(leaf, path->slots[0],
1291                                              struct btrfs_extent_data_ref);
1292                         if (match_extent_data_ref(leaf, ref, root_objectid,
1293                                                   owner, offset))
1294                                 break;
1295                         btrfs_release_path(path);
1296                         key.offset++;
1297                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1298                                                       size);
1299                         if (ret && ret != -EEXIST)
1300                                 goto fail;
1301
1302                         leaf = path->nodes[0];
1303                 }
1304                 ref = btrfs_item_ptr(leaf, path->slots[0],
1305                                      struct btrfs_extent_data_ref);
1306                 if (ret == 0) {
1307                         btrfs_set_extent_data_ref_root(leaf, ref,
1308                                                        root_objectid);
1309                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1310                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1311                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1312                 } else {
1313                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1314                         num_refs += refs_to_add;
1315                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1316                 }
1317         }
1318         btrfs_mark_buffer_dirty(leaf);
1319         ret = 0;
1320 fail:
1321         btrfs_release_path(path);
1322         return ret;
1323 }
1324
1325 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1326                                            struct btrfs_root *root,
1327                                            struct btrfs_path *path,
1328                                            int refs_to_drop, int *last_ref)
1329 {
1330         struct btrfs_key key;
1331         struct btrfs_extent_data_ref *ref1 = NULL;
1332         struct btrfs_shared_data_ref *ref2 = NULL;
1333         struct extent_buffer *leaf;
1334         u32 num_refs = 0;
1335         int ret = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339
1340         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 BUG();
1357         }
1358
1359         BUG_ON(num_refs < refs_to_drop);
1360         num_refs -= refs_to_drop;
1361
1362         if (num_refs == 0) {
1363                 ret = btrfs_del_item(trans, root, path);
1364                 *last_ref = 1;
1365         } else {
1366                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1367                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1368                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1369                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1370 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1371                 else {
1372                         struct btrfs_extent_ref_v0 *ref0;
1373                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1374                                         struct btrfs_extent_ref_v0);
1375                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1376                 }
1377 #endif
1378                 btrfs_mark_buffer_dirty(leaf);
1379         }
1380         return ret;
1381 }
1382
1383 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1384                                           struct btrfs_extent_inline_ref *iref)
1385 {
1386         struct btrfs_key key;
1387         struct extent_buffer *leaf;
1388         struct btrfs_extent_data_ref *ref1;
1389         struct btrfs_shared_data_ref *ref2;
1390         u32 num_refs = 0;
1391
1392         leaf = path->nodes[0];
1393         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1394         if (iref) {
1395                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1396                     BTRFS_EXTENT_DATA_REF_KEY) {
1397                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1398                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1399                 } else {
1400                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1401                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1402                 }
1403         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1404                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1405                                       struct btrfs_extent_data_ref);
1406                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1407         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1408                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1409                                       struct btrfs_shared_data_ref);
1410                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1411 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1412         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1413                 struct btrfs_extent_ref_v0 *ref0;
1414                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1415                                       struct btrfs_extent_ref_v0);
1416                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1417 #endif
1418         } else {
1419                 WARN_ON(1);
1420         }
1421         return num_refs;
1422 }
1423
1424 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1425                                           struct btrfs_root *root,
1426                                           struct btrfs_path *path,
1427                                           u64 bytenr, u64 parent,
1428                                           u64 root_objectid)
1429 {
1430         struct btrfs_key key;
1431         int ret;
1432
1433         key.objectid = bytenr;
1434         if (parent) {
1435                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1436                 key.offset = parent;
1437         } else {
1438                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1439                 key.offset = root_objectid;
1440         }
1441
1442         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1443         if (ret > 0)
1444                 ret = -ENOENT;
1445 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1446         if (ret == -ENOENT && parent) {
1447                 btrfs_release_path(path);
1448                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1449                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1450                 if (ret > 0)
1451                         ret = -ENOENT;
1452         }
1453 #endif
1454         return ret;
1455 }
1456
1457 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1458                                           struct btrfs_root *root,
1459                                           struct btrfs_path *path,
1460                                           u64 bytenr, u64 parent,
1461                                           u64 root_objectid)
1462 {
1463         struct btrfs_key key;
1464         int ret;
1465
1466         key.objectid = bytenr;
1467         if (parent) {
1468                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1469                 key.offset = parent;
1470         } else {
1471                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1472                 key.offset = root_objectid;
1473         }
1474
1475         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1476         btrfs_release_path(path);
1477         return ret;
1478 }
1479
1480 static inline int extent_ref_type(u64 parent, u64 owner)
1481 {
1482         int type;
1483         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1484                 if (parent > 0)
1485                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1486                 else
1487                         type = BTRFS_TREE_BLOCK_REF_KEY;
1488         } else {
1489                 if (parent > 0)
1490                         type = BTRFS_SHARED_DATA_REF_KEY;
1491                 else
1492                         type = BTRFS_EXTENT_DATA_REF_KEY;
1493         }
1494         return type;
1495 }
1496
1497 static int find_next_key(struct btrfs_path *path, int level,
1498                          struct btrfs_key *key)
1499
1500 {
1501         for (; level < BTRFS_MAX_LEVEL; level++) {
1502                 if (!path->nodes[level])
1503                         break;
1504                 if (path->slots[level] + 1 >=
1505                     btrfs_header_nritems(path->nodes[level]))
1506                         continue;
1507                 if (level == 0)
1508                         btrfs_item_key_to_cpu(path->nodes[level], key,
1509                                               path->slots[level] + 1);
1510                 else
1511                         btrfs_node_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 return 0;
1514         }
1515         return 1;
1516 }
1517
1518 /*
1519  * look for inline back ref. if back ref is found, *ref_ret is set
1520  * to the address of inline back ref, and 0 is returned.
1521  *
1522  * if back ref isn't found, *ref_ret is set to the address where it
1523  * should be inserted, and -ENOENT is returned.
1524  *
1525  * if insert is true and there are too many inline back refs, the path
1526  * points to the extent item, and -EAGAIN is returned.
1527  *
1528  * NOTE: inline back refs are ordered in the same way that back ref
1529  *       items in the tree are ordered.
1530  */
1531 static noinline_for_stack
1532 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1533                                  struct btrfs_root *root,
1534                                  struct btrfs_path *path,
1535                                  struct btrfs_extent_inline_ref **ref_ret,
1536                                  u64 bytenr, u64 num_bytes,
1537                                  u64 parent, u64 root_objectid,
1538                                  u64 owner, u64 offset, int insert)
1539 {
1540         struct btrfs_key key;
1541         struct extent_buffer *leaf;
1542         struct btrfs_extent_item *ei;
1543         struct btrfs_extent_inline_ref *iref;
1544         u64 flags;
1545         u64 item_size;
1546         unsigned long ptr;
1547         unsigned long end;
1548         int extra_size;
1549         int type;
1550         int want;
1551         int ret;
1552         int err = 0;
1553         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1554                                                  SKINNY_METADATA);
1555
1556         key.objectid = bytenr;
1557         key.type = BTRFS_EXTENT_ITEM_KEY;
1558         key.offset = num_bytes;
1559
1560         want = extent_ref_type(parent, owner);
1561         if (insert) {
1562                 extra_size = btrfs_extent_inline_ref_size(want);
1563                 path->keep_locks = 1;
1564         } else
1565                 extra_size = -1;
1566
1567         /*
1568          * Owner is our parent level, so we can just add one to get the level
1569          * for the block we are interested in.
1570          */
1571         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1572                 key.type = BTRFS_METADATA_ITEM_KEY;
1573                 key.offset = owner;
1574         }
1575
1576 again:
1577         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1578         if (ret < 0) {
1579                 err = ret;
1580                 goto out;
1581         }
1582
1583         /*
1584          * We may be a newly converted file system which still has the old fat
1585          * extent entries for metadata, so try and see if we have one of those.
1586          */
1587         if (ret > 0 && skinny_metadata) {
1588                 skinny_metadata = false;
1589                 if (path->slots[0]) {
1590                         path->slots[0]--;
1591                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1592                                               path->slots[0]);
1593                         if (key.objectid == bytenr &&
1594                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1595                             key.offset == num_bytes)
1596                                 ret = 0;
1597                 }
1598                 if (ret) {
1599                         key.objectid = bytenr;
1600                         key.type = BTRFS_EXTENT_ITEM_KEY;
1601                         key.offset = num_bytes;
1602                         btrfs_release_path(path);
1603                         goto again;
1604                 }
1605         }
1606
1607         if (ret && !insert) {
1608                 err = -ENOENT;
1609                 goto out;
1610         } else if (WARN_ON(ret)) {
1611                 err = -EIO;
1612                 goto out;
1613         }
1614
1615         leaf = path->nodes[0];
1616         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1617 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1618         if (item_size < sizeof(*ei)) {
1619                 if (!insert) {
1620                         err = -ENOENT;
1621                         goto out;
1622                 }
1623                 ret = convert_extent_item_v0(trans, root, path, owner,
1624                                              extra_size);
1625                 if (ret < 0) {
1626                         err = ret;
1627                         goto out;
1628                 }
1629                 leaf = path->nodes[0];
1630                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1631         }
1632 #endif
1633         BUG_ON(item_size < sizeof(*ei));
1634
1635         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1636         flags = btrfs_extent_flags(leaf, ei);
1637
1638         ptr = (unsigned long)(ei + 1);
1639         end = (unsigned long)ei + item_size;
1640
1641         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1642                 ptr += sizeof(struct btrfs_tree_block_info);
1643                 BUG_ON(ptr > end);
1644         }
1645
1646         err = -ENOENT;
1647         while (1) {
1648                 if (ptr >= end) {
1649                         WARN_ON(ptr > end);
1650                         break;
1651                 }
1652                 iref = (struct btrfs_extent_inline_ref *)ptr;
1653                 type = btrfs_extent_inline_ref_type(leaf, iref);
1654                 if (want < type)
1655                         break;
1656                 if (want > type) {
1657                         ptr += btrfs_extent_inline_ref_size(type);
1658                         continue;
1659                 }
1660
1661                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1662                         struct btrfs_extent_data_ref *dref;
1663                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1664                         if (match_extent_data_ref(leaf, dref, root_objectid,
1665                                                   owner, offset)) {
1666                                 err = 0;
1667                                 break;
1668                         }
1669                         if (hash_extent_data_ref_item(leaf, dref) <
1670                             hash_extent_data_ref(root_objectid, owner, offset))
1671                                 break;
1672                 } else {
1673                         u64 ref_offset;
1674                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1675                         if (parent > 0) {
1676                                 if (parent == ref_offset) {
1677                                         err = 0;
1678                                         break;
1679                                 }
1680                                 if (ref_offset < parent)
1681                                         break;
1682                         } else {
1683                                 if (root_objectid == ref_offset) {
1684                                         err = 0;
1685                                         break;
1686                                 }
1687                                 if (ref_offset < root_objectid)
1688                                         break;
1689                         }
1690                 }
1691                 ptr += btrfs_extent_inline_ref_size(type);
1692         }
1693         if (err == -ENOENT && insert) {
1694                 if (item_size + extra_size >=
1695                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1696                         err = -EAGAIN;
1697                         goto out;
1698                 }
1699                 /*
1700                  * To add new inline back ref, we have to make sure
1701                  * there is no corresponding back ref item.
1702                  * For simplicity, we just do not add new inline back
1703                  * ref if there is any kind of item for this block
1704                  */
1705                 if (find_next_key(path, 0, &key) == 0 &&
1706                     key.objectid == bytenr &&
1707                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1708                         err = -EAGAIN;
1709                         goto out;
1710                 }
1711         }
1712         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1713 out:
1714         if (insert) {
1715                 path->keep_locks = 0;
1716                 btrfs_unlock_up_safe(path, 1);
1717         }
1718         return err;
1719 }
1720
1721 /*
1722  * helper to add new inline back ref
1723  */
1724 static noinline_for_stack
1725 void setup_inline_extent_backref(struct btrfs_root *root,
1726                                  struct btrfs_path *path,
1727                                  struct btrfs_extent_inline_ref *iref,
1728                                  u64 parent, u64 root_objectid,
1729                                  u64 owner, u64 offset, int refs_to_add,
1730                                  struct btrfs_delayed_extent_op *extent_op)
1731 {
1732         struct extent_buffer *leaf;
1733         struct btrfs_extent_item *ei;
1734         unsigned long ptr;
1735         unsigned long end;
1736         unsigned long item_offset;
1737         u64 refs;
1738         int size;
1739         int type;
1740
1741         leaf = path->nodes[0];
1742         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1743         item_offset = (unsigned long)iref - (unsigned long)ei;
1744
1745         type = extent_ref_type(parent, owner);
1746         size = btrfs_extent_inline_ref_size(type);
1747
1748         btrfs_extend_item(root, path, size);
1749
1750         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1751         refs = btrfs_extent_refs(leaf, ei);
1752         refs += refs_to_add;
1753         btrfs_set_extent_refs(leaf, ei, refs);
1754         if (extent_op)
1755                 __run_delayed_extent_op(extent_op, leaf, ei);
1756
1757         ptr = (unsigned long)ei + item_offset;
1758         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1759         if (ptr < end - size)
1760                 memmove_extent_buffer(leaf, ptr + size, ptr,
1761                                       end - size - ptr);
1762
1763         iref = (struct btrfs_extent_inline_ref *)ptr;
1764         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1765         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1766                 struct btrfs_extent_data_ref *dref;
1767                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1768                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1769                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1770                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1771                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1772         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1773                 struct btrfs_shared_data_ref *sref;
1774                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1775                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1776                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1777         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1778                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1779         } else {
1780                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1781         }
1782         btrfs_mark_buffer_dirty(leaf);
1783 }
1784
1785 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1786                                  struct btrfs_root *root,
1787                                  struct btrfs_path *path,
1788                                  struct btrfs_extent_inline_ref **ref_ret,
1789                                  u64 bytenr, u64 num_bytes, u64 parent,
1790                                  u64 root_objectid, u64 owner, u64 offset)
1791 {
1792         int ret;
1793
1794         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1795                                            bytenr, num_bytes, parent,
1796                                            root_objectid, owner, offset, 0);
1797         if (ret != -ENOENT)
1798                 return ret;
1799
1800         btrfs_release_path(path);
1801         *ref_ret = NULL;
1802
1803         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1804                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1805                                             root_objectid);
1806         } else {
1807                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1808                                              root_objectid, owner, offset);
1809         }
1810         return ret;
1811 }
1812
1813 /*
1814  * helper to update/remove inline back ref
1815  */
1816 static noinline_for_stack
1817 void update_inline_extent_backref(struct btrfs_root *root,
1818                                   struct btrfs_path *path,
1819                                   struct btrfs_extent_inline_ref *iref,
1820                                   int refs_to_mod,
1821                                   struct btrfs_delayed_extent_op *extent_op,
1822                                   int *last_ref)
1823 {
1824         struct extent_buffer *leaf;
1825         struct btrfs_extent_item *ei;
1826         struct btrfs_extent_data_ref *dref = NULL;
1827         struct btrfs_shared_data_ref *sref = NULL;
1828         unsigned long ptr;
1829         unsigned long end;
1830         u32 item_size;
1831         int size;
1832         int type;
1833         u64 refs;
1834
1835         leaf = path->nodes[0];
1836         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1837         refs = btrfs_extent_refs(leaf, ei);
1838         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1839         refs += refs_to_mod;
1840         btrfs_set_extent_refs(leaf, ei, refs);
1841         if (extent_op)
1842                 __run_delayed_extent_op(extent_op, leaf, ei);
1843
1844         type = btrfs_extent_inline_ref_type(leaf, iref);
1845
1846         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1847                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1848                 refs = btrfs_extent_data_ref_count(leaf, dref);
1849         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1850                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1851                 refs = btrfs_shared_data_ref_count(leaf, sref);
1852         } else {
1853                 refs = 1;
1854                 BUG_ON(refs_to_mod != -1);
1855         }
1856
1857         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1858         refs += refs_to_mod;
1859
1860         if (refs > 0) {
1861                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1862                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1863                 else
1864                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1865         } else {
1866                 *last_ref = 1;
1867                 size =  btrfs_extent_inline_ref_size(type);
1868                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1869                 ptr = (unsigned long)iref;
1870                 end = (unsigned long)ei + item_size;
1871                 if (ptr + size < end)
1872                         memmove_extent_buffer(leaf, ptr, ptr + size,
1873                                               end - ptr - size);
1874                 item_size -= size;
1875                 btrfs_truncate_item(root, path, item_size, 1);
1876         }
1877         btrfs_mark_buffer_dirty(leaf);
1878 }
1879
1880 static noinline_for_stack
1881 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1882                                  struct btrfs_root *root,
1883                                  struct btrfs_path *path,
1884                                  u64 bytenr, u64 num_bytes, u64 parent,
1885                                  u64 root_objectid, u64 owner,
1886                                  u64 offset, int refs_to_add,
1887                                  struct btrfs_delayed_extent_op *extent_op)
1888 {
1889         struct btrfs_extent_inline_ref *iref;
1890         int ret;
1891
1892         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1893                                            bytenr, num_bytes, parent,
1894                                            root_objectid, owner, offset, 1);
1895         if (ret == 0) {
1896                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1897                 update_inline_extent_backref(root, path, iref,
1898                                              refs_to_add, extent_op, NULL);
1899         } else if (ret == -ENOENT) {
1900                 setup_inline_extent_backref(root, path, iref, parent,
1901                                             root_objectid, owner, offset,
1902                                             refs_to_add, extent_op);
1903                 ret = 0;
1904         }
1905         return ret;
1906 }
1907
1908 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1909                                  struct btrfs_root *root,
1910                                  struct btrfs_path *path,
1911                                  u64 bytenr, u64 parent, u64 root_objectid,
1912                                  u64 owner, u64 offset, int refs_to_add)
1913 {
1914         int ret;
1915         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1916                 BUG_ON(refs_to_add != 1);
1917                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1918                                             parent, root_objectid);
1919         } else {
1920                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1921                                              parent, root_objectid,
1922                                              owner, offset, refs_to_add);
1923         }
1924         return ret;
1925 }
1926
1927 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1928                                  struct btrfs_root *root,
1929                                  struct btrfs_path *path,
1930                                  struct btrfs_extent_inline_ref *iref,
1931                                  int refs_to_drop, int is_data, int *last_ref)
1932 {
1933         int ret = 0;
1934
1935         BUG_ON(!is_data && refs_to_drop != 1);
1936         if (iref) {
1937                 update_inline_extent_backref(root, path, iref,
1938                                              -refs_to_drop, NULL, last_ref);
1939         } else if (is_data) {
1940                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1941                                              last_ref);
1942         } else {
1943                 *last_ref = 1;
1944                 ret = btrfs_del_item(trans, root, path);
1945         }
1946         return ret;
1947 }
1948
1949 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1950 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1951                                u64 *discarded_bytes)
1952 {
1953         int j, ret = 0;
1954         u64 bytes_left, end;
1955         u64 aligned_start = ALIGN(start, 1 << 9);
1956
1957         if (WARN_ON(start != aligned_start)) {
1958                 len -= aligned_start - start;
1959                 len = round_down(len, 1 << 9);
1960                 start = aligned_start;
1961         }
1962
1963         *discarded_bytes = 0;
1964
1965         if (!len)
1966                 return 0;
1967
1968         end = start + len;
1969         bytes_left = len;
1970
1971         /* Skip any superblocks on this device. */
1972         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1973                 u64 sb_start = btrfs_sb_offset(j);
1974                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1975                 u64 size = sb_start - start;
1976
1977                 if (!in_range(sb_start, start, bytes_left) &&
1978                     !in_range(sb_end, start, bytes_left) &&
1979                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1980                         continue;
1981
1982                 /*
1983                  * Superblock spans beginning of range.  Adjust start and
1984                  * try again.
1985                  */
1986                 if (sb_start <= start) {
1987                         start += sb_end - start;
1988                         if (start > end) {
1989                                 bytes_left = 0;
1990                                 break;
1991                         }
1992                         bytes_left = end - start;
1993                         continue;
1994                 }
1995
1996                 if (size) {
1997                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1998                                                    GFP_NOFS, 0);
1999                         if (!ret)
2000                                 *discarded_bytes += size;
2001                         else if (ret != -EOPNOTSUPP)
2002                                 return ret;
2003                 }
2004
2005                 start = sb_end;
2006                 if (start > end) {
2007                         bytes_left = 0;
2008                         break;
2009                 }
2010                 bytes_left = end - start;
2011         }
2012
2013         if (bytes_left) {
2014                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2015                                            GFP_NOFS, 0);
2016                 if (!ret)
2017                         *discarded_bytes += bytes_left;
2018         }
2019         return ret;
2020 }
2021
2022 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2023                          u64 num_bytes, u64 *actual_bytes)
2024 {
2025         int ret;
2026         u64 discarded_bytes = 0;
2027         struct btrfs_bio *bbio = NULL;
2028
2029
2030         /* Tell the block device(s) that the sectors can be discarded */
2031         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2032                               bytenr, &num_bytes, &bbio, 0);
2033         /* Error condition is -ENOMEM */
2034         if (!ret) {
2035                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2036                 int i;
2037
2038
2039                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040                         u64 bytes;
2041                         if (!stripe->dev->can_discard)
2042                                 continue;
2043
2044                         ret = btrfs_issue_discard(stripe->dev->bdev,
2045                                                   stripe->physical,
2046                                                   stripe->length,
2047                                                   &bytes);
2048                         if (!ret)
2049                                 discarded_bytes += bytes;
2050                         else if (ret != -EOPNOTSUPP)
2051                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2052
2053                         /*
2054                          * Just in case we get back EOPNOTSUPP for some reason,
2055                          * just ignore the return value so we don't screw up
2056                          * people calling discard_extent.
2057                          */
2058                         ret = 0;
2059                 }
2060                 btrfs_put_bbio(bbio);
2061         }
2062
2063         if (actual_bytes)
2064                 *actual_bytes = discarded_bytes;
2065
2066
2067         if (ret == -EOPNOTSUPP)
2068                 ret = 0;
2069         return ret;
2070 }
2071
2072 /* Can return -ENOMEM */
2073 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2074                          struct btrfs_root *root,
2075                          u64 bytenr, u64 num_bytes, u64 parent,
2076                          u64 root_objectid, u64 owner, u64 offset,
2077                          int no_quota)
2078 {
2079         int ret;
2080         struct btrfs_fs_info *fs_info = root->fs_info;
2081
2082         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2083                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2084
2085         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2086                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2087                                         num_bytes,
2088                                         parent, root_objectid, (int)owner,
2089                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2090         } else {
2091                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2092                                         num_bytes,
2093                                         parent, root_objectid, owner, offset,
2094                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2095         }
2096         return ret;
2097 }
2098
2099 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2100                                   struct btrfs_root *root,
2101                                   struct btrfs_delayed_ref_node *node,
2102                                   u64 parent, u64 root_objectid,
2103                                   u64 owner, u64 offset, int refs_to_add,
2104                                   struct btrfs_delayed_extent_op *extent_op)
2105 {
2106         struct btrfs_fs_info *fs_info = root->fs_info;
2107         struct btrfs_path *path;
2108         struct extent_buffer *leaf;
2109         struct btrfs_extent_item *item;
2110         struct btrfs_key key;
2111         u64 bytenr = node->bytenr;
2112         u64 num_bytes = node->num_bytes;
2113         u64 refs;
2114         int ret;
2115         int no_quota = node->no_quota;
2116
2117         path = btrfs_alloc_path();
2118         if (!path)
2119                 return -ENOMEM;
2120
2121         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2122                 no_quota = 1;
2123
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         /* this will setup the path even if it fails to insert the back ref */
2127         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2128                                            bytenr, num_bytes, parent,
2129                                            root_objectid, owner, offset,
2130                                            refs_to_add, extent_op);
2131         if ((ret < 0 && ret != -EAGAIN) || !ret)
2132                 goto out;
2133
2134         /*
2135          * Ok we had -EAGAIN which means we didn't have space to insert and
2136          * inline extent ref, so just update the reference count and add a
2137          * normal backref.
2138          */
2139         leaf = path->nodes[0];
2140         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2141         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2142         refs = btrfs_extent_refs(leaf, item);
2143         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2144         if (extent_op)
2145                 __run_delayed_extent_op(extent_op, leaf, item);
2146
2147         btrfs_mark_buffer_dirty(leaf);
2148         btrfs_release_path(path);
2149
2150         path->reada = 1;
2151         path->leave_spinning = 1;
2152         /* now insert the actual backref */
2153         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2154                                     path, bytenr, parent, root_objectid,
2155                                     owner, offset, refs_to_add);
2156         if (ret)
2157                 btrfs_abort_transaction(trans, root, ret);
2158 out:
2159         btrfs_free_path(path);
2160         return ret;
2161 }
2162
2163 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2164                                 struct btrfs_root *root,
2165                                 struct btrfs_delayed_ref_node *node,
2166                                 struct btrfs_delayed_extent_op *extent_op,
2167                                 int insert_reserved)
2168 {
2169         int ret = 0;
2170         struct btrfs_delayed_data_ref *ref;
2171         struct btrfs_key ins;
2172         u64 parent = 0;
2173         u64 ref_root = 0;
2174         u64 flags = 0;
2175
2176         ins.objectid = node->bytenr;
2177         ins.offset = node->num_bytes;
2178         ins.type = BTRFS_EXTENT_ITEM_KEY;
2179
2180         ref = btrfs_delayed_node_to_data_ref(node);
2181         trace_run_delayed_data_ref(node, ref, node->action);
2182
2183         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2184                 parent = ref->parent;
2185         ref_root = ref->root;
2186
2187         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2188                 if (extent_op)
2189                         flags |= extent_op->flags_to_set;
2190                 ret = alloc_reserved_file_extent(trans, root,
2191                                                  parent, ref_root, flags,
2192                                                  ref->objectid, ref->offset,
2193                                                  &ins, node->ref_mod);
2194         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2195                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2196                                              ref_root, ref->objectid,
2197                                              ref->offset, node->ref_mod,
2198                                              extent_op);
2199         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2200                 ret = __btrfs_free_extent(trans, root, node, parent,
2201                                           ref_root, ref->objectid,
2202                                           ref->offset, node->ref_mod,
2203                                           extent_op);
2204         } else {
2205                 BUG();
2206         }
2207         return ret;
2208 }
2209
2210 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2211                                     struct extent_buffer *leaf,
2212                                     struct btrfs_extent_item *ei)
2213 {
2214         u64 flags = btrfs_extent_flags(leaf, ei);
2215         if (extent_op->update_flags) {
2216                 flags |= extent_op->flags_to_set;
2217                 btrfs_set_extent_flags(leaf, ei, flags);
2218         }
2219
2220         if (extent_op->update_key) {
2221                 struct btrfs_tree_block_info *bi;
2222                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2223                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2224                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2225         }
2226 }
2227
2228 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2229                                  struct btrfs_root *root,
2230                                  struct btrfs_delayed_ref_node *node,
2231                                  struct btrfs_delayed_extent_op *extent_op)
2232 {
2233         struct btrfs_key key;
2234         struct btrfs_path *path;
2235         struct btrfs_extent_item *ei;
2236         struct extent_buffer *leaf;
2237         u32 item_size;
2238         int ret;
2239         int err = 0;
2240         int metadata = !extent_op->is_data;
2241
2242         if (trans->aborted)
2243                 return 0;
2244
2245         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2246                 metadata = 0;
2247
2248         path = btrfs_alloc_path();
2249         if (!path)
2250                 return -ENOMEM;
2251
2252         key.objectid = node->bytenr;
2253
2254         if (metadata) {
2255                 key.type = BTRFS_METADATA_ITEM_KEY;
2256                 key.offset = extent_op->level;
2257         } else {
2258                 key.type = BTRFS_EXTENT_ITEM_KEY;
2259                 key.offset = node->num_bytes;
2260         }
2261
2262 again:
2263         path->reada = 1;
2264         path->leave_spinning = 1;
2265         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2266                                 path, 0, 1);
2267         if (ret < 0) {
2268                 err = ret;
2269                 goto out;
2270         }
2271         if (ret > 0) {
2272                 if (metadata) {
2273                         if (path->slots[0] > 0) {
2274                                 path->slots[0]--;
2275                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2276                                                       path->slots[0]);
2277                                 if (key.objectid == node->bytenr &&
2278                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2279                                     key.offset == node->num_bytes)
2280                                         ret = 0;
2281                         }
2282                         if (ret > 0) {
2283                                 btrfs_release_path(path);
2284                                 metadata = 0;
2285
2286                                 key.objectid = node->bytenr;
2287                                 key.offset = node->num_bytes;
2288                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2289                                 goto again;
2290                         }
2291                 } else {
2292                         err = -EIO;
2293                         goto out;
2294                 }
2295         }
2296
2297         leaf = path->nodes[0];
2298         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2299 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2300         if (item_size < sizeof(*ei)) {
2301                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2302                                              path, (u64)-1, 0);
2303                 if (ret < 0) {
2304                         err = ret;
2305                         goto out;
2306                 }
2307                 leaf = path->nodes[0];
2308                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309         }
2310 #endif
2311         BUG_ON(item_size < sizeof(*ei));
2312         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2313         __run_delayed_extent_op(extent_op, leaf, ei);
2314
2315         btrfs_mark_buffer_dirty(leaf);
2316 out:
2317         btrfs_free_path(path);
2318         return err;
2319 }
2320
2321 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2322                                 struct btrfs_root *root,
2323                                 struct btrfs_delayed_ref_node *node,
2324                                 struct btrfs_delayed_extent_op *extent_op,
2325                                 int insert_reserved)
2326 {
2327         int ret = 0;
2328         struct btrfs_delayed_tree_ref *ref;
2329         struct btrfs_key ins;
2330         u64 parent = 0;
2331         u64 ref_root = 0;
2332         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2333                                                  SKINNY_METADATA);
2334
2335         ref = btrfs_delayed_node_to_tree_ref(node);
2336         trace_run_delayed_tree_ref(node, ref, node->action);
2337
2338         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2339                 parent = ref->parent;
2340         ref_root = ref->root;
2341
2342         ins.objectid = node->bytenr;
2343         if (skinny_metadata) {
2344                 ins.offset = ref->level;
2345                 ins.type = BTRFS_METADATA_ITEM_KEY;
2346         } else {
2347                 ins.offset = node->num_bytes;
2348                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2349         }
2350
2351         BUG_ON(node->ref_mod != 1);
2352         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353                 BUG_ON(!extent_op || !extent_op->update_flags);
2354                 ret = alloc_reserved_tree_block(trans, root,
2355                                                 parent, ref_root,
2356                                                 extent_op->flags_to_set,
2357                                                 &extent_op->key,
2358                                                 ref->level, &ins,
2359                                                 node->no_quota);
2360         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2361                 ret = __btrfs_inc_extent_ref(trans, root, node,
2362                                              parent, ref_root,
2363                                              ref->level, 0, 1,
2364                                              extent_op);
2365         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2366                 ret = __btrfs_free_extent(trans, root, node,
2367                                           parent, ref_root,
2368                                           ref->level, 0, 1, extent_op);
2369         } else {
2370                 BUG();
2371         }
2372         return ret;
2373 }
2374
2375 /* helper function to actually process a single delayed ref entry */
2376 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2377                                struct btrfs_root *root,
2378                                struct btrfs_delayed_ref_node *node,
2379                                struct btrfs_delayed_extent_op *extent_op,
2380                                int insert_reserved)
2381 {
2382         int ret = 0;
2383
2384         if (trans->aborted) {
2385                 if (insert_reserved)
2386                         btrfs_pin_extent(root, node->bytenr,
2387                                          node->num_bytes, 1);
2388                 return 0;
2389         }
2390
2391         if (btrfs_delayed_ref_is_head(node)) {
2392                 struct btrfs_delayed_ref_head *head;
2393                 /*
2394                  * we've hit the end of the chain and we were supposed
2395                  * to insert this extent into the tree.  But, it got
2396                  * deleted before we ever needed to insert it, so all
2397                  * we have to do is clean up the accounting
2398                  */
2399                 BUG_ON(extent_op);
2400                 head = btrfs_delayed_node_to_head(node);
2401                 trace_run_delayed_ref_head(node, head, node->action);
2402
2403                 if (insert_reserved) {
2404                         btrfs_pin_extent(root, node->bytenr,
2405                                          node->num_bytes, 1);
2406                         if (head->is_data) {
2407                                 ret = btrfs_del_csums(trans, root,
2408                                                       node->bytenr,
2409                                                       node->num_bytes);
2410                         }
2411                 }
2412                 return ret;
2413         }
2414
2415         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2416             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2417                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2418                                            insert_reserved);
2419         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2420                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2421                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2422                                            insert_reserved);
2423         else
2424                 BUG();
2425         return ret;
2426 }
2427
2428 static inline struct btrfs_delayed_ref_node *
2429 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2430 {
2431         struct btrfs_delayed_ref_node *ref;
2432
2433         if (list_empty(&head->ref_list))
2434                 return NULL;
2435
2436         /*
2437          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2438          * This is to prevent a ref count from going down to zero, which deletes
2439          * the extent item from the extent tree, when there still are references
2440          * to add, which would fail because they would not find the extent item.
2441          */
2442         list_for_each_entry(ref, &head->ref_list, list) {
2443                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2444                         return ref;
2445         }
2446
2447         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2448                           list);
2449 }
2450
2451 /*
2452  * Returns 0 on success or if called with an already aborted transaction.
2453  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2454  */
2455 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2456                                              struct btrfs_root *root,
2457                                              unsigned long nr)
2458 {
2459         struct btrfs_delayed_ref_root *delayed_refs;
2460         struct btrfs_delayed_ref_node *ref;
2461         struct btrfs_delayed_ref_head *locked_ref = NULL;
2462         struct btrfs_delayed_extent_op *extent_op;
2463         struct btrfs_fs_info *fs_info = root->fs_info;
2464         ktime_t start = ktime_get();
2465         int ret;
2466         unsigned long count = 0;
2467         unsigned long actual_count = 0;
2468         int must_insert_reserved = 0;
2469
2470         delayed_refs = &trans->transaction->delayed_refs;
2471         while (1) {
2472                 if (!locked_ref) {
2473                         if (count >= nr)
2474                                 break;
2475
2476                         spin_lock(&delayed_refs->lock);
2477                         locked_ref = btrfs_select_ref_head(trans);
2478                         if (!locked_ref) {
2479                                 spin_unlock(&delayed_refs->lock);
2480                                 break;
2481                         }
2482
2483                         /* grab the lock that says we are going to process
2484                          * all the refs for this head */
2485                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2486                         spin_unlock(&delayed_refs->lock);
2487                         /*
2488                          * we may have dropped the spin lock to get the head
2489                          * mutex lock, and that might have given someone else
2490                          * time to free the head.  If that's true, it has been
2491                          * removed from our list and we can move on.
2492                          */
2493                         if (ret == -EAGAIN) {
2494                                 locked_ref = NULL;
2495                                 count++;
2496                                 continue;
2497                         }
2498                 }
2499
2500                 spin_lock(&locked_ref->lock);
2501
2502                 /*
2503                  * locked_ref is the head node, so we have to go one
2504                  * node back for any delayed ref updates
2505                  */
2506                 ref = select_delayed_ref(locked_ref);
2507
2508                 if (ref && ref->seq &&
2509                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2510                         spin_unlock(&locked_ref->lock);
2511                         btrfs_delayed_ref_unlock(locked_ref);
2512                         spin_lock(&delayed_refs->lock);
2513                         locked_ref->processing = 0;
2514                         delayed_refs->num_heads_ready++;
2515                         spin_unlock(&delayed_refs->lock);
2516                         locked_ref = NULL;
2517                         cond_resched();
2518                         count++;
2519                         continue;
2520                 }
2521
2522                 /*
2523                  * record the must insert reserved flag before we
2524                  * drop the spin lock.
2525                  */
2526                 must_insert_reserved = locked_ref->must_insert_reserved;
2527                 locked_ref->must_insert_reserved = 0;
2528
2529                 extent_op = locked_ref->extent_op;
2530                 locked_ref->extent_op = NULL;
2531
2532                 if (!ref) {
2533
2534
2535                         /* All delayed refs have been processed, Go ahead
2536                          * and send the head node to run_one_delayed_ref,
2537                          * so that any accounting fixes can happen
2538                          */
2539                         ref = &locked_ref->node;
2540
2541                         if (extent_op && must_insert_reserved) {
2542                                 btrfs_free_delayed_extent_op(extent_op);
2543                                 extent_op = NULL;
2544                         }
2545
2546                         if (extent_op) {
2547                                 spin_unlock(&locked_ref->lock);
2548                                 ret = run_delayed_extent_op(trans, root,
2549                                                             ref, extent_op);
2550                                 btrfs_free_delayed_extent_op(extent_op);
2551
2552                                 if (ret) {
2553                                         /*
2554                                          * Need to reset must_insert_reserved if
2555                                          * there was an error so the abort stuff
2556                                          * can cleanup the reserved space
2557                                          * properly.
2558                                          */
2559                                         if (must_insert_reserved)
2560                                                 locked_ref->must_insert_reserved = 1;
2561                                         locked_ref->processing = 0;
2562                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2563                                         btrfs_delayed_ref_unlock(locked_ref);
2564                                         return ret;
2565                                 }
2566                                 continue;
2567                         }
2568
2569                         /*
2570                          * Need to drop our head ref lock and re-aqcuire the
2571                          * delayed ref lock and then re-check to make sure
2572                          * nobody got added.
2573                          */
2574                         spin_unlock(&locked_ref->lock);
2575                         spin_lock(&delayed_refs->lock);
2576                         spin_lock(&locked_ref->lock);
2577                         if (!list_empty(&locked_ref->ref_list) ||
2578                             locked_ref->extent_op) {
2579                                 spin_unlock(&locked_ref->lock);
2580                                 spin_unlock(&delayed_refs->lock);
2581                                 continue;
2582                         }
2583                         ref->in_tree = 0;
2584                         delayed_refs->num_heads--;
2585                         rb_erase(&locked_ref->href_node,
2586                                  &delayed_refs->href_root);
2587                         spin_unlock(&delayed_refs->lock);
2588                 } else {
2589                         actual_count++;
2590                         ref->in_tree = 0;
2591                         list_del(&ref->list);
2592                 }
2593                 atomic_dec(&delayed_refs->num_entries);
2594
2595                 if (!btrfs_delayed_ref_is_head(ref)) {
2596                         /*
2597                          * when we play the delayed ref, also correct the
2598                          * ref_mod on head
2599                          */
2600                         switch (ref->action) {
2601                         case BTRFS_ADD_DELAYED_REF:
2602                         case BTRFS_ADD_DELAYED_EXTENT:
2603                                 locked_ref->node.ref_mod -= ref->ref_mod;
2604                                 break;
2605                         case BTRFS_DROP_DELAYED_REF:
2606                                 locked_ref->node.ref_mod += ref->ref_mod;
2607                                 break;
2608                         default:
2609                                 WARN_ON(1);
2610                         }
2611                 }
2612                 spin_unlock(&locked_ref->lock);
2613
2614                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2615                                           must_insert_reserved);
2616
2617                 btrfs_free_delayed_extent_op(extent_op);
2618                 if (ret) {
2619                         locked_ref->processing = 0;
2620                         btrfs_delayed_ref_unlock(locked_ref);
2621                         btrfs_put_delayed_ref(ref);
2622                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2623                         return ret;
2624                 }
2625
2626                 /*
2627                  * If this node is a head, that means all the refs in this head
2628                  * have been dealt with, and we will pick the next head to deal
2629                  * with, so we must unlock the head and drop it from the cluster
2630                  * list before we release it.
2631                  */
2632                 if (btrfs_delayed_ref_is_head(ref)) {
2633                         if (locked_ref->is_data &&
2634                             locked_ref->total_ref_mod < 0) {
2635                                 spin_lock(&delayed_refs->lock);
2636                                 delayed_refs->pending_csums -= ref->num_bytes;
2637                                 spin_unlock(&delayed_refs->lock);
2638                         }
2639                         btrfs_delayed_ref_unlock(locked_ref);
2640                         locked_ref = NULL;
2641                 }
2642                 btrfs_put_delayed_ref(ref);
2643                 count++;
2644                 cond_resched();
2645         }
2646
2647         /*
2648          * We don't want to include ref heads since we can have empty ref heads
2649          * and those will drastically skew our runtime down since we just do
2650          * accounting, no actual extent tree updates.
2651          */
2652         if (actual_count > 0) {
2653                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2654                 u64 avg;
2655
2656                 /*
2657                  * We weigh the current average higher than our current runtime
2658                  * to avoid large swings in the average.
2659                  */
2660                 spin_lock(&delayed_refs->lock);
2661                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2662                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2663                 spin_unlock(&delayed_refs->lock);
2664         }
2665         return 0;
2666 }
2667
2668 #ifdef SCRAMBLE_DELAYED_REFS
2669 /*
2670  * Normally delayed refs get processed in ascending bytenr order. This
2671  * correlates in most cases to the order added. To expose dependencies on this
2672  * order, we start to process the tree in the middle instead of the beginning
2673  */
2674 static u64 find_middle(struct rb_root *root)
2675 {
2676         struct rb_node *n = root->rb_node;
2677         struct btrfs_delayed_ref_node *entry;
2678         int alt = 1;
2679         u64 middle;
2680         u64 first = 0, last = 0;
2681
2682         n = rb_first(root);
2683         if (n) {
2684                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2685                 first = entry->bytenr;
2686         }
2687         n = rb_last(root);
2688         if (n) {
2689                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2690                 last = entry->bytenr;
2691         }
2692         n = root->rb_node;
2693
2694         while (n) {
2695                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2696                 WARN_ON(!entry->in_tree);
2697
2698                 middle = entry->bytenr;
2699
2700                 if (alt)
2701                         n = n->rb_left;
2702                 else
2703                         n = n->rb_right;
2704
2705                 alt = 1 - alt;
2706         }
2707         return middle;
2708 }
2709 #endif
2710
2711 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2712 {
2713         u64 num_bytes;
2714
2715         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2716                              sizeof(struct btrfs_extent_inline_ref));
2717         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2718                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2719
2720         /*
2721          * We don't ever fill up leaves all the way so multiply by 2 just to be
2722          * closer to what we're really going to want to ouse.
2723          */
2724         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2725 }
2726
2727 /*
2728  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2729  * would require to store the csums for that many bytes.
2730  */
2731 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2732 {
2733         u64 csum_size;
2734         u64 num_csums_per_leaf;
2735         u64 num_csums;
2736
2737         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2738         num_csums_per_leaf = div64_u64(csum_size,
2739                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2740         num_csums = div64_u64(csum_bytes, root->sectorsize);
2741         num_csums += num_csums_per_leaf - 1;
2742         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2743         return num_csums;
2744 }
2745
2746 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2747                                        struct btrfs_root *root)
2748 {
2749         struct btrfs_block_rsv *global_rsv;
2750         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2751         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2752         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2753         u64 num_bytes, num_dirty_bgs_bytes;
2754         int ret = 0;
2755
2756         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2757         num_heads = heads_to_leaves(root, num_heads);
2758         if (num_heads > 1)
2759                 num_bytes += (num_heads - 1) * root->nodesize;
2760         num_bytes <<= 1;
2761         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2762         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2763                                                              num_dirty_bgs);
2764         global_rsv = &root->fs_info->global_block_rsv;
2765
2766         /*
2767          * If we can't allocate any more chunks lets make sure we have _lots_ of
2768          * wiggle room since running delayed refs can create more delayed refs.
2769          */
2770         if (global_rsv->space_info->full) {
2771                 num_dirty_bgs_bytes <<= 1;
2772                 num_bytes <<= 1;
2773         }
2774
2775         spin_lock(&global_rsv->lock);
2776         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2777                 ret = 1;
2778         spin_unlock(&global_rsv->lock);
2779         return ret;
2780 }
2781
2782 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2783                                        struct btrfs_root *root)
2784 {
2785         struct btrfs_fs_info *fs_info = root->fs_info;
2786         u64 num_entries =
2787                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2788         u64 avg_runtime;
2789         u64 val;
2790
2791         smp_mb();
2792         avg_runtime = fs_info->avg_delayed_ref_runtime;
2793         val = num_entries * avg_runtime;
2794         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2795                 return 1;
2796         if (val >= NSEC_PER_SEC / 2)
2797                 return 2;
2798
2799         return btrfs_check_space_for_delayed_refs(trans, root);
2800 }
2801
2802 struct async_delayed_refs {
2803         struct btrfs_root *root;
2804         int count;
2805         int error;
2806         int sync;
2807         struct completion wait;
2808         struct btrfs_work work;
2809 };
2810
2811 static void delayed_ref_async_start(struct btrfs_work *work)
2812 {
2813         struct async_delayed_refs *async;
2814         struct btrfs_trans_handle *trans;
2815         int ret;
2816
2817         async = container_of(work, struct async_delayed_refs, work);
2818
2819         trans = btrfs_join_transaction(async->root);
2820         if (IS_ERR(trans)) {
2821                 async->error = PTR_ERR(trans);
2822                 goto done;
2823         }
2824
2825         /*
2826          * trans->sync means that when we call end_transaciton, we won't
2827          * wait on delayed refs
2828          */
2829         trans->sync = true;
2830         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2831         if (ret)
2832                 async->error = ret;
2833
2834         ret = btrfs_end_transaction(trans, async->root);
2835         if (ret && !async->error)
2836                 async->error = ret;
2837 done:
2838         if (async->sync)
2839                 complete(&async->wait);
2840         else
2841                 kfree(async);
2842 }
2843
2844 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2845                                  unsigned long count, int wait)
2846 {
2847         struct async_delayed_refs *async;
2848         int ret;
2849
2850         async = kmalloc(sizeof(*async), GFP_NOFS);
2851         if (!async)
2852                 return -ENOMEM;
2853
2854         async->root = root->fs_info->tree_root;
2855         async->count = count;
2856         async->error = 0;
2857         if (wait)
2858                 async->sync = 1;
2859         else
2860                 async->sync = 0;
2861         init_completion(&async->wait);
2862
2863         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2864                         delayed_ref_async_start, NULL, NULL);
2865
2866         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2867
2868         if (wait) {
2869                 wait_for_completion(&async->wait);
2870                 ret = async->error;
2871                 kfree(async);
2872                 return ret;
2873         }
2874         return 0;
2875 }
2876
2877 /*
2878  * this starts processing the delayed reference count updates and
2879  * extent insertions we have queued up so far.  count can be
2880  * 0, which means to process everything in the tree at the start
2881  * of the run (but not newly added entries), or it can be some target
2882  * number you'd like to process.
2883  *
2884  * Returns 0 on success or if called with an aborted transaction
2885  * Returns <0 on error and aborts the transaction
2886  */
2887 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2888                            struct btrfs_root *root, unsigned long count)
2889 {
2890         struct rb_node *node;
2891         struct btrfs_delayed_ref_root *delayed_refs;
2892         struct btrfs_delayed_ref_head *head;
2893         int ret;
2894         int run_all = count == (unsigned long)-1;
2895         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2896
2897         /* We'll clean this up in btrfs_cleanup_transaction */
2898         if (trans->aborted)
2899                 return 0;
2900
2901         if (root == root->fs_info->extent_root)
2902                 root = root->fs_info->tree_root;
2903
2904         delayed_refs = &trans->transaction->delayed_refs;
2905         if (count == 0)
2906                 count = atomic_read(&delayed_refs->num_entries) * 2;
2907
2908 again:
2909 #ifdef SCRAMBLE_DELAYED_REFS
2910         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2911 #endif
2912         trans->can_flush_pending_bgs = false;
2913         ret = __btrfs_run_delayed_refs(trans, root, count);
2914         if (ret < 0) {
2915                 btrfs_abort_transaction(trans, root, ret);
2916                 return ret;
2917         }
2918
2919         if (run_all) {
2920                 if (!list_empty(&trans->new_bgs))
2921                         btrfs_create_pending_block_groups(trans, root);
2922
2923                 spin_lock(&delayed_refs->lock);
2924                 node = rb_first(&delayed_refs->href_root);
2925                 if (!node) {
2926                         spin_unlock(&delayed_refs->lock);
2927                         goto out;
2928                 }
2929                 count = (unsigned long)-1;
2930
2931                 while (node) {
2932                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2933                                         href_node);
2934                         if (btrfs_delayed_ref_is_head(&head->node)) {
2935                                 struct btrfs_delayed_ref_node *ref;
2936
2937                                 ref = &head->node;
2938                                 atomic_inc(&ref->refs);
2939
2940                                 spin_unlock(&delayed_refs->lock);
2941                                 /*
2942                                  * Mutex was contended, block until it's
2943                                  * released and try again
2944                                  */
2945                                 mutex_lock(&head->mutex);
2946                                 mutex_unlock(&head->mutex);
2947
2948                                 btrfs_put_delayed_ref(ref);
2949                                 cond_resched();
2950                                 goto again;
2951                         } else {
2952                                 WARN_ON(1);
2953                         }
2954                         node = rb_next(node);
2955                 }
2956                 spin_unlock(&delayed_refs->lock);
2957                 cond_resched();
2958                 goto again;
2959         }
2960 out:
2961         assert_qgroups_uptodate(trans);
2962         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2963         return 0;
2964 }
2965
2966 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2967                                 struct btrfs_root *root,
2968                                 u64 bytenr, u64 num_bytes, u64 flags,
2969                                 int level, int is_data)
2970 {
2971         struct btrfs_delayed_extent_op *extent_op;
2972         int ret;
2973
2974         extent_op = btrfs_alloc_delayed_extent_op();
2975         if (!extent_op)
2976                 return -ENOMEM;
2977
2978         extent_op->flags_to_set = flags;
2979         extent_op->update_flags = 1;
2980         extent_op->update_key = 0;
2981         extent_op->is_data = is_data ? 1 : 0;
2982         extent_op->level = level;
2983
2984         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2985                                           num_bytes, extent_op);
2986         if (ret)
2987                 btrfs_free_delayed_extent_op(extent_op);
2988         return ret;
2989 }
2990
2991 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2992                                       struct btrfs_root *root,
2993                                       struct btrfs_path *path,
2994                                       u64 objectid, u64 offset, u64 bytenr)
2995 {
2996         struct btrfs_delayed_ref_head *head;
2997         struct btrfs_delayed_ref_node *ref;
2998         struct btrfs_delayed_data_ref *data_ref;
2999         struct btrfs_delayed_ref_root *delayed_refs;
3000         int ret = 0;
3001
3002         delayed_refs = &trans->transaction->delayed_refs;
3003         spin_lock(&delayed_refs->lock);
3004         head = btrfs_find_delayed_ref_head(trans, bytenr);
3005         if (!head) {
3006                 spin_unlock(&delayed_refs->lock);
3007                 return 0;
3008         }
3009
3010         if (!mutex_trylock(&head->mutex)) {
3011                 atomic_inc(&head->node.refs);
3012                 spin_unlock(&delayed_refs->lock);
3013
3014                 btrfs_release_path(path);
3015
3016                 /*
3017                  * Mutex was contended, block until it's released and let
3018                  * caller try again
3019                  */
3020                 mutex_lock(&head->mutex);
3021                 mutex_unlock(&head->mutex);
3022                 btrfs_put_delayed_ref(&head->node);
3023                 return -EAGAIN;
3024         }
3025         spin_unlock(&delayed_refs->lock);
3026
3027         spin_lock(&head->lock);
3028         list_for_each_entry(ref, &head->ref_list, list) {
3029                 /* If it's a shared ref we know a cross reference exists */
3030                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3031                         ret = 1;
3032                         break;
3033                 }
3034
3035                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3036
3037                 /*
3038                  * If our ref doesn't match the one we're currently looking at
3039                  * then we have a cross reference.
3040                  */
3041                 if (data_ref->root != root->root_key.objectid ||
3042                     data_ref->objectid != objectid ||
3043                     data_ref->offset != offset) {
3044                         ret = 1;
3045                         break;
3046                 }
3047         }
3048         spin_unlock(&head->lock);
3049         mutex_unlock(&head->mutex);
3050         return ret;
3051 }
3052
3053 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3054                                         struct btrfs_root *root,
3055                                         struct btrfs_path *path,
3056                                         u64 objectid, u64 offset, u64 bytenr)
3057 {
3058         struct btrfs_root *extent_root = root->fs_info->extent_root;
3059         struct extent_buffer *leaf;
3060         struct btrfs_extent_data_ref *ref;
3061         struct btrfs_extent_inline_ref *iref;
3062         struct btrfs_extent_item *ei;
3063         struct btrfs_key key;
3064         u32 item_size;
3065         int ret;
3066
3067         key.objectid = bytenr;
3068         key.offset = (u64)-1;
3069         key.type = BTRFS_EXTENT_ITEM_KEY;
3070
3071         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3072         if (ret < 0)
3073                 goto out;
3074         BUG_ON(ret == 0); /* Corruption */
3075
3076         ret = -ENOENT;
3077         if (path->slots[0] == 0)
3078                 goto out;
3079
3080         path->slots[0]--;
3081         leaf = path->nodes[0];
3082         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3083
3084         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3085                 goto out;
3086
3087         ret = 1;
3088         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3089 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3090         if (item_size < sizeof(*ei)) {
3091                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3092                 goto out;
3093         }
3094 #endif
3095         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3096
3097         if (item_size != sizeof(*ei) +
3098             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3099                 goto out;
3100
3101         if (btrfs_extent_generation(leaf, ei) <=
3102             btrfs_root_last_snapshot(&root->root_item))
3103                 goto out;
3104
3105         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3106         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3107             BTRFS_EXTENT_DATA_REF_KEY)
3108                 goto out;
3109
3110         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3111         if (btrfs_extent_refs(leaf, ei) !=
3112             btrfs_extent_data_ref_count(leaf, ref) ||
3113             btrfs_extent_data_ref_root(leaf, ref) !=
3114             root->root_key.objectid ||
3115             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3116             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3117                 goto out;
3118
3119         ret = 0;
3120 out:
3121         return ret;
3122 }
3123
3124 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3125                           struct btrfs_root *root,
3126                           u64 objectid, u64 offset, u64 bytenr)
3127 {
3128         struct btrfs_path *path;
3129         int ret;
3130         int ret2;
3131
3132         path = btrfs_alloc_path();
3133         if (!path)
3134                 return -ENOENT;
3135
3136         do {
3137                 ret = check_committed_ref(trans, root, path, objectid,
3138                                           offset, bytenr);
3139                 if (ret && ret != -ENOENT)
3140                         goto out;
3141
3142                 ret2 = check_delayed_ref(trans, root, path, objectid,
3143                                          offset, bytenr);
3144         } while (ret2 == -EAGAIN);
3145
3146         if (ret2 && ret2 != -ENOENT) {
3147                 ret = ret2;
3148                 goto out;
3149         }
3150
3151         if (ret != -ENOENT || ret2 != -ENOENT)
3152                 ret = 0;
3153 out:
3154         btrfs_free_path(path);
3155         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3156                 WARN_ON(ret > 0);
3157         return ret;
3158 }
3159
3160 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3161                            struct btrfs_root *root,
3162                            struct extent_buffer *buf,
3163                            int full_backref, int inc)
3164 {
3165         u64 bytenr;
3166         u64 num_bytes;
3167         u64 parent;
3168         u64 ref_root;
3169         u32 nritems;
3170         struct btrfs_key key;
3171         struct btrfs_file_extent_item *fi;
3172         int i;
3173         int level;
3174         int ret = 0;
3175         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3176                             u64, u64, u64, u64, u64, u64, int);
3177
3178
3179         if (btrfs_test_is_dummy_root(root))
3180                 return 0;
3181
3182         ref_root = btrfs_header_owner(buf);
3183         nritems = btrfs_header_nritems(buf);
3184         level = btrfs_header_level(buf);
3185
3186         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3187                 return 0;
3188
3189         if (inc)
3190                 process_func = btrfs_inc_extent_ref;
3191         else
3192                 process_func = btrfs_free_extent;
3193
3194         if (full_backref)
3195                 parent = buf->start;
3196         else
3197                 parent = 0;
3198
3199         for (i = 0; i < nritems; i++) {
3200                 if (level == 0) {
3201                         btrfs_item_key_to_cpu(buf, &key, i);
3202                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3203                                 continue;
3204                         fi = btrfs_item_ptr(buf, i,
3205                                             struct btrfs_file_extent_item);
3206                         if (btrfs_file_extent_type(buf, fi) ==
3207                             BTRFS_FILE_EXTENT_INLINE)
3208                                 continue;
3209                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3210                         if (bytenr == 0)
3211                                 continue;
3212
3213                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3214                         key.offset -= btrfs_file_extent_offset(buf, fi);
3215                         ret = process_func(trans, root, bytenr, num_bytes,
3216                                            parent, ref_root, key.objectid,
3217                                            key.offset, 1);
3218                         if (ret)
3219                                 goto fail;
3220                 } else {
3221                         bytenr = btrfs_node_blockptr(buf, i);
3222                         num_bytes = root->nodesize;
3223                         ret = process_func(trans, root, bytenr, num_bytes,
3224                                            parent, ref_root, level - 1, 0,
3225                                            1);
3226                         if (ret)
3227                                 goto fail;
3228                 }
3229         }
3230         return 0;
3231 fail:
3232         return ret;
3233 }
3234
3235 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3236                   struct extent_buffer *buf, int full_backref)
3237 {
3238         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3239 }
3240
3241 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3242                   struct extent_buffer *buf, int full_backref)
3243 {
3244         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3245 }
3246
3247 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3248                                  struct btrfs_root *root,
3249                                  struct btrfs_path *path,
3250                                  struct btrfs_block_group_cache *cache)
3251 {
3252         int ret;
3253         struct btrfs_root *extent_root = root->fs_info->extent_root;
3254         unsigned long bi;
3255         struct extent_buffer *leaf;
3256
3257         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3258         if (ret) {
3259                 if (ret > 0)
3260                         ret = -ENOENT;
3261                 goto fail;
3262         }
3263
3264         leaf = path->nodes[0];
3265         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3266         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3267         btrfs_mark_buffer_dirty(leaf);
3268 fail:
3269         btrfs_release_path(path);
3270         return ret;
3271
3272 }
3273
3274 static struct btrfs_block_group_cache *
3275 next_block_group(struct btrfs_root *root,
3276                  struct btrfs_block_group_cache *cache)
3277 {
3278         struct rb_node *node;
3279
3280         spin_lock(&root->fs_info->block_group_cache_lock);
3281
3282         /* If our block group was removed, we need a full search. */
3283         if (RB_EMPTY_NODE(&cache->cache_node)) {
3284                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3285
3286                 spin_unlock(&root->fs_info->block_group_cache_lock);
3287                 btrfs_put_block_group(cache);
3288                 cache = btrfs_lookup_first_block_group(root->fs_info,
3289                                                        next_bytenr);
3290                 return cache;
3291         }
3292         node = rb_next(&cache->cache_node);
3293         btrfs_put_block_group(cache);
3294         if (node) {
3295                 cache = rb_entry(node, struct btrfs_block_group_cache,
3296                                  cache_node);
3297                 btrfs_get_block_group(cache);
3298         } else
3299                 cache = NULL;
3300         spin_unlock(&root->fs_info->block_group_cache_lock);
3301         return cache;
3302 }
3303
3304 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3305                             struct btrfs_trans_handle *trans,
3306                             struct btrfs_path *path)
3307 {
3308         struct btrfs_root *root = block_group->fs_info->tree_root;
3309         struct inode *inode = NULL;
3310         u64 alloc_hint = 0;
3311         int dcs = BTRFS_DC_ERROR;
3312         u64 num_pages = 0;
3313         int retries = 0;
3314         int ret = 0;
3315
3316         /*
3317          * If this block group is smaller than 100 megs don't bother caching the
3318          * block group.
3319          */
3320         if (block_group->key.offset < (100 * 1024 * 1024)) {
3321                 spin_lock(&block_group->lock);
3322                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3323                 spin_unlock(&block_group->lock);
3324                 return 0;
3325         }
3326
3327         if (trans->aborted)
3328                 return 0;
3329 again:
3330         inode = lookup_free_space_inode(root, block_group, path);
3331         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3332                 ret = PTR_ERR(inode);
3333                 btrfs_release_path(path);
3334                 goto out;
3335         }
3336
3337         if (IS_ERR(inode)) {
3338                 BUG_ON(retries);
3339                 retries++;
3340
3341                 if (block_group->ro)
3342                         goto out_free;
3343
3344                 ret = create_free_space_inode(root, trans, block_group, path);
3345                 if (ret)
3346                         goto out_free;
3347                 goto again;
3348         }
3349
3350         /* We've already setup this transaction, go ahead and exit */
3351         if (block_group->cache_generation == trans->transid &&
3352             i_size_read(inode)) {
3353                 dcs = BTRFS_DC_SETUP;
3354                 goto out_put;
3355         }
3356
3357         /*
3358          * We want to set the generation to 0, that way if anything goes wrong
3359          * from here on out we know not to trust this cache when we load up next
3360          * time.
3361          */
3362         BTRFS_I(inode)->generation = 0;
3363         ret = btrfs_update_inode(trans, root, inode);
3364         if (ret) {
3365                 /*
3366                  * So theoretically we could recover from this, simply set the
3367                  * super cache generation to 0 so we know to invalidate the
3368                  * cache, but then we'd have to keep track of the block groups
3369                  * that fail this way so we know we _have_ to reset this cache
3370                  * before the next commit or risk reading stale cache.  So to
3371                  * limit our exposure to horrible edge cases lets just abort the
3372                  * transaction, this only happens in really bad situations
3373                  * anyway.
3374                  */
3375                 btrfs_abort_transaction(trans, root, ret);
3376                 goto out_put;
3377         }
3378         WARN_ON(ret);
3379
3380         if (i_size_read(inode) > 0) {
3381                 ret = btrfs_check_trunc_cache_free_space(root,
3382                                         &root->fs_info->global_block_rsv);
3383                 if (ret)
3384                         goto out_put;
3385
3386                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3387                 if (ret)
3388                         goto out_put;
3389         }
3390
3391         spin_lock(&block_group->lock);
3392         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3393             !btrfs_test_opt(root, SPACE_CACHE)) {
3394                 /*
3395                  * don't bother trying to write stuff out _if_
3396                  * a) we're not cached,
3397                  * b) we're with nospace_cache mount option.
3398                  */
3399                 dcs = BTRFS_DC_WRITTEN;
3400                 spin_unlock(&block_group->lock);
3401                 goto out_put;
3402         }
3403         spin_unlock(&block_group->lock);
3404
3405         /*
3406          * Try to preallocate enough space based on how big the block group is.
3407          * Keep in mind this has to include any pinned space which could end up
3408          * taking up quite a bit since it's not folded into the other space
3409          * cache.
3410          */
3411         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3412         if (!num_pages)
3413                 num_pages = 1;
3414
3415         num_pages *= 16;
3416         num_pages *= PAGE_CACHE_SIZE;
3417
3418         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3419         if (ret)
3420                 goto out_put;
3421
3422         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3423                                               num_pages, num_pages,
3424                                               &alloc_hint);
3425         if (!ret)
3426                 dcs = BTRFS_DC_SETUP;
3427         btrfs_free_reserved_data_space(inode, num_pages);
3428
3429 out_put:
3430         iput(inode);
3431 out_free:
3432         btrfs_release_path(path);
3433 out:
3434         spin_lock(&block_group->lock);
3435         if (!ret && dcs == BTRFS_DC_SETUP)
3436                 block_group->cache_generation = trans->transid;
3437         block_group->disk_cache_state = dcs;
3438         spin_unlock(&block_group->lock);
3439
3440         return ret;
3441 }
3442
3443 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3444                             struct btrfs_root *root)
3445 {
3446         struct btrfs_block_group_cache *cache, *tmp;
3447         struct btrfs_transaction *cur_trans = trans->transaction;
3448         struct btrfs_path *path;
3449
3450         if (list_empty(&cur_trans->dirty_bgs) ||
3451             !btrfs_test_opt(root, SPACE_CACHE))
3452                 return 0;
3453
3454         path = btrfs_alloc_path();
3455         if (!path)
3456                 return -ENOMEM;
3457
3458         /* Could add new block groups, use _safe just in case */
3459         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3460                                  dirty_list) {
3461                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3462                         cache_save_setup(cache, trans, path);
3463         }
3464
3465         btrfs_free_path(path);
3466         return 0;
3467 }
3468
3469 /*
3470  * transaction commit does final block group cache writeback during a
3471  * critical section where nothing is allowed to change the FS.  This is
3472  * required in order for the cache to actually match the block group,
3473  * but can introduce a lot of latency into the commit.
3474  *
3475  * So, btrfs_start_dirty_block_groups is here to kick off block group
3476  * cache IO.  There's a chance we'll have to redo some of it if the
3477  * block group changes again during the commit, but it greatly reduces
3478  * the commit latency by getting rid of the easy block groups while
3479  * we're still allowing others to join the commit.
3480  */
3481 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3482                                    struct btrfs_root *root)
3483 {
3484         struct btrfs_block_group_cache *cache;
3485         struct btrfs_transaction *cur_trans = trans->transaction;
3486         int ret = 0;
3487         int should_put;
3488         struct btrfs_path *path = NULL;
3489         LIST_HEAD(dirty);
3490         struct list_head *io = &cur_trans->io_bgs;
3491         int num_started = 0;
3492         int loops = 0;
3493
3494         spin_lock(&cur_trans->dirty_bgs_lock);
3495         if (list_empty(&cur_trans->dirty_bgs)) {
3496                 spin_unlock(&cur_trans->dirty_bgs_lock);
3497                 return 0;
3498         }
3499         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3500         spin_unlock(&cur_trans->dirty_bgs_lock);
3501
3502 again:
3503         /*
3504          * make sure all the block groups on our dirty list actually
3505          * exist
3506          */
3507         btrfs_create_pending_block_groups(trans, root);
3508
3509         if (!path) {
3510                 path = btrfs_alloc_path();
3511                 if (!path)
3512                         return -ENOMEM;
3513         }
3514
3515         /*
3516          * cache_write_mutex is here only to save us from balance or automatic
3517          * removal of empty block groups deleting this block group while we are
3518          * writing out the cache
3519          */
3520         mutex_lock(&trans->transaction->cache_write_mutex);
3521         while (!list_empty(&dirty)) {
3522                 cache = list_first_entry(&dirty,
3523                                          struct btrfs_block_group_cache,
3524                                          dirty_list);
3525                 /*
3526                  * this can happen if something re-dirties a block
3527                  * group that is already under IO.  Just wait for it to
3528                  * finish and then do it all again
3529                  */
3530                 if (!list_empty(&cache->io_list)) {
3531                         list_del_init(&cache->io_list);
3532                         btrfs_wait_cache_io(root, trans, cache,
3533                                             &cache->io_ctl, path,
3534                                             cache->key.objectid);
3535                         btrfs_put_block_group(cache);
3536                 }
3537
3538
3539                 /*
3540                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3541                  * if it should update the cache_state.  Don't delete
3542                  * until after we wait.
3543                  *
3544                  * Since we're not running in the commit critical section
3545                  * we need the dirty_bgs_lock to protect from update_block_group
3546                  */
3547                 spin_lock(&cur_trans->dirty_bgs_lock);
3548                 list_del_init(&cache->dirty_list);
3549                 spin_unlock(&cur_trans->dirty_bgs_lock);
3550
3551                 should_put = 1;
3552
3553                 cache_save_setup(cache, trans, path);
3554
3555                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3556                         cache->io_ctl.inode = NULL;
3557                         ret = btrfs_write_out_cache(root, trans, cache, path);
3558                         if (ret == 0 && cache->io_ctl.inode) {
3559                                 num_started++;
3560                                 should_put = 0;
3561
3562                                 /*
3563                                  * the cache_write_mutex is protecting
3564                                  * the io_list
3565                                  */
3566                                 list_add_tail(&cache->io_list, io);
3567                         } else {
3568                                 /*
3569                                  * if we failed to write the cache, the
3570                                  * generation will be bad and life goes on
3571                                  */
3572                                 ret = 0;
3573                         }
3574                 }
3575                 if (!ret) {
3576                         ret = write_one_cache_group(trans, root, path, cache);
3577                         /*
3578                          * Our block group might still be attached to the list
3579                          * of new block groups in the transaction handle of some
3580                          * other task (struct btrfs_trans_handle->new_bgs). This
3581                          * means its block group item isn't yet in the extent
3582                          * tree. If this happens ignore the error, as we will
3583                          * try again later in the critical section of the
3584                          * transaction commit.
3585                          */
3586                         if (ret == -ENOENT) {
3587                                 ret = 0;
3588                                 spin_lock(&cur_trans->dirty_bgs_lock);
3589                                 if (list_empty(&cache->dirty_list)) {
3590                                         list_add_tail(&cache->dirty_list,
3591                                                       &cur_trans->dirty_bgs);
3592                                         btrfs_get_block_group(cache);
3593                                 }
3594                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3595                         } else if (ret) {
3596                                 btrfs_abort_transaction(trans, root, ret);
3597                         }
3598                 }
3599
3600                 /* if its not on the io list, we need to put the block group */
3601                 if (should_put)
3602                         btrfs_put_block_group(cache);
3603
3604                 if (ret)
3605                         break;
3606
3607                 /*
3608                  * Avoid blocking other tasks for too long. It might even save
3609                  * us from writing caches for block groups that are going to be
3610                  * removed.
3611                  */
3612                 mutex_unlock(&trans->transaction->cache_write_mutex);
3613                 mutex_lock(&trans->transaction->cache_write_mutex);
3614         }
3615         mutex_unlock(&trans->transaction->cache_write_mutex);
3616
3617         /*
3618          * go through delayed refs for all the stuff we've just kicked off
3619          * and then loop back (just once)
3620          */
3621         ret = btrfs_run_delayed_refs(trans, root, 0);
3622         if (!ret && loops == 0) {
3623                 loops++;
3624                 spin_lock(&cur_trans->dirty_bgs_lock);
3625                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3626                 /*
3627                  * dirty_bgs_lock protects us from concurrent block group
3628                  * deletes too (not just cache_write_mutex).
3629                  */
3630                 if (!list_empty(&dirty)) {
3631                         spin_unlock(&cur_trans->dirty_bgs_lock);
3632                         goto again;
3633                 }
3634                 spin_unlock(&cur_trans->dirty_bgs_lock);
3635         }
3636
3637         btrfs_free_path(path);
3638         return ret;
3639 }
3640
3641 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3642                                    struct btrfs_root *root)
3643 {
3644         struct btrfs_block_group_cache *cache;
3645         struct btrfs_transaction *cur_trans = trans->transaction;
3646         int ret = 0;
3647         int should_put;
3648         struct btrfs_path *path;
3649         struct list_head *io = &cur_trans->io_bgs;
3650         int num_started = 0;
3651
3652         path = btrfs_alloc_path();
3653         if (!path)
3654                 return -ENOMEM;
3655
3656         /*
3657          * We don't need the lock here since we are protected by the transaction
3658          * commit.  We want to do the cache_save_setup first and then run the
3659          * delayed refs to make sure we have the best chance at doing this all
3660          * in one shot.
3661          */
3662         while (!list_empty(&cur_trans->dirty_bgs)) {
3663                 cache = list_first_entry(&cur_trans->dirty_bgs,
3664                                          struct btrfs_block_group_cache,
3665                                          dirty_list);
3666
3667                 /*
3668                  * this can happen if cache_save_setup re-dirties a block
3669                  * group that is already under IO.  Just wait for it to
3670                  * finish and then do it all again
3671                  */
3672                 if (!list_empty(&cache->io_list)) {
3673                         list_del_init(&cache->io_list);
3674                         btrfs_wait_cache_io(root, trans, cache,
3675                                             &cache->io_ctl, path,
3676                                             cache->key.objectid);
3677                         btrfs_put_block_group(cache);
3678                 }
3679
3680                 /*
3681                  * don't remove from the dirty list until after we've waited
3682                  * on any pending IO
3683                  */
3684                 list_del_init(&cache->dirty_list);
3685                 should_put = 1;
3686
3687                 cache_save_setup(cache, trans, path);
3688
3689                 if (!ret)
3690                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3691
3692                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3693                         cache->io_ctl.inode = NULL;
3694                         ret = btrfs_write_out_cache(root, trans, cache, path);
3695                         if (ret == 0 && cache->io_ctl.inode) {
3696                                 num_started++;
3697                                 should_put = 0;
3698                                 list_add_tail(&cache->io_list, io);
3699                         } else {
3700                                 /*
3701                                  * if we failed to write the cache, the
3702                                  * generation will be bad and life goes on
3703                                  */
3704                                 ret = 0;
3705                         }
3706                 }
3707                 if (!ret) {
3708                         ret = write_one_cache_group(trans, root, path, cache);
3709                         if (ret)
3710                                 btrfs_abort_transaction(trans, root, ret);
3711                 }
3712
3713                 /* if its not on the io list, we need to put the block group */
3714                 if (should_put)
3715                         btrfs_put_block_group(cache);
3716         }
3717
3718         while (!list_empty(io)) {
3719                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3720                                          io_list);
3721                 list_del_init(&cache->io_list);
3722                 btrfs_wait_cache_io(root, trans, cache,
3723                                     &cache->io_ctl, path, cache->key.objectid);
3724                 btrfs_put_block_group(cache);
3725         }
3726
3727         btrfs_free_path(path);
3728         return ret;
3729 }
3730
3731 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3732 {
3733         struct btrfs_block_group_cache *block_group;
3734         int readonly = 0;
3735
3736         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3737         if (!block_group || block_group->ro)
3738                 readonly = 1;
3739         if (block_group)
3740                 btrfs_put_block_group(block_group);
3741         return readonly;
3742 }
3743
3744 static const char *alloc_name(u64 flags)
3745 {
3746         switch (flags) {
3747         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3748                 return "mixed";
3749         case BTRFS_BLOCK_GROUP_METADATA:
3750                 return "metadata";
3751         case BTRFS_BLOCK_GROUP_DATA:
3752                 return "data";
3753         case BTRFS_BLOCK_GROUP_SYSTEM:
3754                 return "system";
3755         default:
3756                 WARN_ON(1);
3757                 return "invalid-combination";
3758         };
3759 }
3760
3761 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3762                              u64 total_bytes, u64 bytes_used,
3763                              struct btrfs_space_info **space_info)
3764 {
3765         struct btrfs_space_info *found;
3766         int i;
3767         int factor;
3768         int ret;
3769
3770         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3771                      BTRFS_BLOCK_GROUP_RAID10))
3772                 factor = 2;
3773         else
3774                 factor = 1;
3775
3776         found = __find_space_info(info, flags);
3777         if (found) {
3778                 spin_lock(&found->lock);
3779                 found->total_bytes += total_bytes;
3780                 found->disk_total += total_bytes * factor;
3781                 found->bytes_used += bytes_used;
3782                 found->disk_used += bytes_used * factor;
3783                 if (total_bytes > 0)
3784                         found->full = 0;
3785                 spin_unlock(&found->lock);
3786                 *space_info = found;
3787                 return 0;
3788         }
3789         found = kzalloc(sizeof(*found), GFP_NOFS);
3790         if (!found)
3791                 return -ENOMEM;
3792
3793         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3794         if (ret) {
3795                 kfree(found);
3796                 return ret;
3797         }
3798
3799         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3800                 INIT_LIST_HEAD(&found->block_groups[i]);
3801         init_rwsem(&found->groups_sem);
3802         spin_lock_init(&found->lock);
3803         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3804         found->total_bytes = total_bytes;
3805         found->disk_total = total_bytes * factor;
3806         found->bytes_used = bytes_used;
3807         found->disk_used = bytes_used * factor;
3808         found->bytes_pinned = 0;
3809         found->bytes_reserved = 0;
3810         found->bytes_readonly = 0;
3811         found->bytes_may_use = 0;
3812         found->full = 0;
3813         found->max_extent_size = 0;
3814         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3815         found->chunk_alloc = 0;
3816         found->flush = 0;
3817         init_waitqueue_head(&found->wait);
3818         INIT_LIST_HEAD(&found->ro_bgs);
3819
3820         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3821                                     info->space_info_kobj, "%s",
3822                                     alloc_name(found->flags));
3823         if (ret) {
3824                 kfree(found);
3825                 return ret;
3826         }
3827
3828         *space_info = found;
3829         list_add_rcu(&found->list, &info->space_info);
3830         if (flags & BTRFS_BLOCK_GROUP_DATA)
3831                 info->data_sinfo = found;
3832
3833         return ret;
3834 }
3835
3836 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3837 {
3838         u64 extra_flags = chunk_to_extended(flags) &
3839                                 BTRFS_EXTENDED_PROFILE_MASK;
3840
3841         write_seqlock(&fs_info->profiles_lock);
3842         if (flags & BTRFS_BLOCK_GROUP_DATA)
3843                 fs_info->avail_data_alloc_bits |= extra_flags;
3844         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3845                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3846         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3847                 fs_info->avail_system_alloc_bits |= extra_flags;
3848         write_sequnlock(&fs_info->profiles_lock);
3849 }
3850
3851 /*
3852  * returns target flags in extended format or 0 if restripe for this
3853  * chunk_type is not in progress
3854  *
3855  * should be called with either volume_mutex or balance_lock held
3856  */
3857 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3858 {
3859         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3860         u64 target = 0;
3861
3862         if (!bctl)
3863                 return 0;
3864
3865         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3866             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3867                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3868         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3869                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3870                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3871         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3872                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3873                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3874         }
3875
3876         return target;
3877 }
3878
3879 /*
3880  * @flags: available profiles in extended format (see ctree.h)
3881  *
3882  * Returns reduced profile in chunk format.  If profile changing is in
3883  * progress (either running or paused) picks the target profile (if it's
3884  * already available), otherwise falls back to plain reducing.
3885  */
3886 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3887 {
3888         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3889         u64 target;
3890         u64 raid_type;
3891         u64 allowed = 0;
3892
3893         /*
3894          * see if restripe for this chunk_type is in progress, if so
3895          * try to reduce to the target profile
3896          */
3897         spin_lock(&root->fs_info->balance_lock);
3898         target = get_restripe_target(root->fs_info, flags);
3899         if (target) {
3900                 /* pick target profile only if it's already available */
3901                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3902                         spin_unlock(&root->fs_info->balance_lock);
3903                         return extended_to_chunk(target);
3904                 }
3905         }
3906         spin_unlock(&root->fs_info->balance_lock);
3907
3908         /* First, mask out the RAID levels which aren't possible */
3909         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3910                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3911                         allowed |= btrfs_raid_group[raid_type];
3912         }
3913         allowed &= flags;
3914
3915         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3916                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3917         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3918                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3919         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3920                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3921         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3922                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3923         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3924                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3925
3926         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3927
3928         return extended_to_chunk(flags | allowed);
3929 }
3930
3931 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3932 {
3933         unsigned seq;
3934         u64 flags;
3935
3936         do {
3937                 flags = orig_flags;
3938                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3939
3940                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3941                         flags |= root->fs_info->avail_data_alloc_bits;
3942                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3943                         flags |= root->fs_info->avail_system_alloc_bits;
3944                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3945                         flags |= root->fs_info->avail_metadata_alloc_bits;
3946         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3947
3948         return btrfs_reduce_alloc_profile(root, flags);
3949 }
3950
3951 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3952 {
3953         u64 flags;
3954         u64 ret;
3955
3956         if (data)
3957                 flags = BTRFS_BLOCK_GROUP_DATA;
3958         else if (root == root->fs_info->chunk_root)
3959                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3960         else
3961                 flags = BTRFS_BLOCK_GROUP_METADATA;
3962
3963         ret = get_alloc_profile(root, flags);
3964         return ret;
3965 }
3966
3967 /*
3968  * This will check the space that the inode allocates from to make sure we have
3969  * enough space for bytes.
3970  */
3971 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3972 {
3973         struct btrfs_space_info *data_sinfo;
3974         struct btrfs_root *root = BTRFS_I(inode)->root;
3975         struct btrfs_fs_info *fs_info = root->fs_info;
3976         u64 used;
3977         int ret = 0;
3978         int need_commit = 2;
3979         int have_pinned_space;
3980
3981         /* make sure bytes are sectorsize aligned */
3982         bytes = ALIGN(bytes, root->sectorsize);
3983
3984         if (btrfs_is_free_space_inode(inode)) {
3985                 need_commit = 0;
3986                 ASSERT(current->journal_info);
3987         }
3988
3989         data_sinfo = fs_info->data_sinfo;
3990         if (!data_sinfo)
3991                 goto alloc;
3992
3993 again:
3994         /* make sure we have enough space to handle the data first */
3995         spin_lock(&data_sinfo->lock);
3996         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3997                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3998                 data_sinfo->bytes_may_use;
3999
4000         if (used + bytes > data_sinfo->total_bytes) {
4001                 struct btrfs_trans_handle *trans;
4002
4003                 /*
4004                  * if we don't have enough free bytes in this space then we need
4005                  * to alloc a new chunk.
4006                  */
4007                 if (!data_sinfo->full) {
4008                         u64 alloc_target;
4009
4010                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4011                         spin_unlock(&data_sinfo->lock);
4012 alloc:
4013                         alloc_target = btrfs_get_alloc_profile(root, 1);
4014                         /*
4015                          * It is ugly that we don't call nolock join
4016                          * transaction for the free space inode case here.
4017                          * But it is safe because we only do the data space
4018                          * reservation for the free space cache in the
4019                          * transaction context, the common join transaction
4020                          * just increase the counter of the current transaction
4021                          * handler, doesn't try to acquire the trans_lock of
4022                          * the fs.
4023                          */
4024                         trans = btrfs_join_transaction(root);
4025                         if (IS_ERR(trans))
4026                                 return PTR_ERR(trans);
4027
4028                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4029                                              alloc_target,
4030                                              CHUNK_ALLOC_NO_FORCE);
4031                         btrfs_end_transaction(trans, root);
4032                         if (ret < 0) {
4033                                 if (ret != -ENOSPC)
4034                                         return ret;
4035                                 else {
4036                                         have_pinned_space = 1;
4037                                         goto commit_trans;
4038                                 }
4039                         }
4040
4041                         if (!data_sinfo)
4042                                 data_sinfo = fs_info->data_sinfo;
4043
4044                         goto again;
4045                 }
4046
4047                 /*
4048                  * If we don't have enough pinned space to deal with this
4049                  * allocation, and no removed chunk in current transaction,
4050                  * don't bother committing the transaction.
4051                  */
4052                 have_pinned_space = percpu_counter_compare(
4053                         &data_sinfo->total_bytes_pinned,
4054                         used + bytes - data_sinfo->total_bytes);
4055                 spin_unlock(&data_sinfo->lock);
4056
4057                 /* commit the current transaction and try again */
4058 commit_trans:
4059                 if (need_commit &&
4060                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4061                         need_commit--;
4062
4063                         if (need_commit > 0)
4064                                 btrfs_wait_ordered_roots(fs_info, -1);
4065
4066                         trans = btrfs_join_transaction(root);
4067                         if (IS_ERR(trans))
4068                                 return PTR_ERR(trans);
4069                         if (have_pinned_space >= 0 ||
4070                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4071                                      &trans->transaction->flags) ||
4072                             need_commit > 0) {
4073                                 ret = btrfs_commit_transaction(trans, root);
4074                                 if (ret)
4075                                         return ret;
4076                                 /*
4077                                  * make sure that all running delayed iput are
4078                                  * done
4079                                  */
4080                                 down_write(&root->fs_info->delayed_iput_sem);
4081                                 up_write(&root->fs_info->delayed_iput_sem);
4082                                 goto again;
4083                         } else {
4084                                 btrfs_end_transaction(trans, root);
4085                         }
4086                 }
4087
4088                 trace_btrfs_space_reservation(root->fs_info,
4089                                               "space_info:enospc",
4090                                               data_sinfo->flags, bytes, 1);
4091                 return -ENOSPC;
4092         }
4093         ret = btrfs_qgroup_reserve(root, write_bytes);
4094         if (ret)
4095                 goto out;
4096         data_sinfo->bytes_may_use += bytes;
4097         trace_btrfs_space_reservation(root->fs_info, "space_info",
4098                                       data_sinfo->flags, bytes, 1);
4099 out:
4100         spin_unlock(&data_sinfo->lock);
4101
4102         return ret;
4103 }
4104
4105 /*
4106  * Called if we need to clear a data reservation for this inode.
4107  */
4108 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4109 {
4110         struct btrfs_root *root = BTRFS_I(inode)->root;
4111         struct btrfs_space_info *data_sinfo;
4112
4113         /* make sure bytes are sectorsize aligned */
4114         bytes = ALIGN(bytes, root->sectorsize);
4115
4116         data_sinfo = root->fs_info->data_sinfo;
4117         spin_lock(&data_sinfo->lock);
4118         WARN_ON(data_sinfo->bytes_may_use < bytes);
4119         data_sinfo->bytes_may_use -= bytes;
4120         trace_btrfs_space_reservation(root->fs_info, "space_info",
4121                                       data_sinfo->flags, bytes, 0);
4122         spin_unlock(&data_sinfo->lock);
4123 }
4124
4125 static void force_metadata_allocation(struct btrfs_fs_info *info)
4126 {
4127         struct list_head *head = &info->space_info;
4128         struct btrfs_space_info *found;
4129
4130         rcu_read_lock();
4131         list_for_each_entry_rcu(found, head, list) {
4132                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4133                         found->force_alloc = CHUNK_ALLOC_FORCE;
4134         }
4135         rcu_read_unlock();
4136 }
4137
4138 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4139 {
4140         return (global->size << 1);
4141 }
4142
4143 static int should_alloc_chunk(struct btrfs_root *root,
4144                               struct btrfs_space_info *sinfo, int force)
4145 {
4146         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4147         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4148         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4149         u64 thresh;
4150
4151         if (force == CHUNK_ALLOC_FORCE)
4152                 return 1;
4153
4154         /*
4155          * We need to take into account the global rsv because for all intents
4156          * and purposes it's used space.  Don't worry about locking the
4157          * global_rsv, it doesn't change except when the transaction commits.
4158          */
4159         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4160                 num_allocated += calc_global_rsv_need_space(global_rsv);
4161
4162         /*
4163          * in limited mode, we want to have some free space up to
4164          * about 1% of the FS size.
4165          */
4166         if (force == CHUNK_ALLOC_LIMITED) {
4167                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4168                 thresh = max_t(u64, 64 * 1024 * 1024,
4169                                div_factor_fine(thresh, 1));
4170
4171                 if (num_bytes - num_allocated < thresh)
4172                         return 1;
4173         }
4174
4175         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4176                 return 0;
4177         return 1;
4178 }
4179
4180 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4181 {
4182         u64 num_dev;
4183
4184         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4185                     BTRFS_BLOCK_GROUP_RAID0 |
4186                     BTRFS_BLOCK_GROUP_RAID5 |
4187                     BTRFS_BLOCK_GROUP_RAID6))
4188                 num_dev = root->fs_info->fs_devices->rw_devices;
4189         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4190                 num_dev = 2;
4191         else
4192                 num_dev = 1;    /* DUP or single */
4193
4194         return num_dev;
4195 }
4196
4197 /*
4198  * If @is_allocation is true, reserve space in the system space info necessary
4199  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4200  * removing a chunk.
4201  */
4202 void check_system_chunk(struct btrfs_trans_handle *trans,
4203                         struct btrfs_root *root,
4204                         u64 type)
4205 {
4206         struct btrfs_space_info *info;
4207         u64 left;
4208         u64 thresh;
4209         int ret = 0;
4210         u64 num_devs;
4211
4212         /*
4213          * Needed because we can end up allocating a system chunk and for an
4214          * atomic and race free space reservation in the chunk block reserve.
4215          */
4216         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4217
4218         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4219         spin_lock(&info->lock);
4220         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4221                 info->bytes_reserved - info->bytes_readonly -
4222                 info->bytes_may_use;
4223         spin_unlock(&info->lock);
4224
4225         num_devs = get_profile_num_devs(root, type);
4226
4227         /* num_devs device items to update and 1 chunk item to add or remove */
4228         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4229                 btrfs_calc_trans_metadata_size(root, 1);
4230
4231         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4232                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4233                         left, thresh, type);
4234                 dump_space_info(info, 0, 0);
4235         }
4236
4237         if (left < thresh) {
4238                 u64 flags;
4239
4240                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4241                 /*
4242                  * Ignore failure to create system chunk. We might end up not
4243                  * needing it, as we might not need to COW all nodes/leafs from
4244                  * the paths we visit in the chunk tree (they were already COWed
4245                  * or created in the current transaction for example).
4246                  */
4247                 ret = btrfs_alloc_chunk(trans, root, flags);
4248         }
4249
4250         if (!ret) {
4251                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4252                                           &root->fs_info->chunk_block_rsv,
4253                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4254                 if (!ret)
4255                         trans->chunk_bytes_reserved += thresh;
4256         }
4257 }
4258
4259 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4260                           struct btrfs_root *extent_root, u64 flags, int force)
4261 {
4262         struct btrfs_space_info *space_info;
4263         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4264         int wait_for_alloc = 0;
4265         int ret = 0;
4266
4267         /* Don't re-enter if we're already allocating a chunk */
4268         if (trans->allocating_chunk)
4269                 return -ENOSPC;
4270
4271         space_info = __find_space_info(extent_root->fs_info, flags);
4272         if (!space_info) {
4273                 ret = update_space_info(extent_root->fs_info, flags,
4274                                         0, 0, &space_info);
4275                 BUG_ON(ret); /* -ENOMEM */
4276         }
4277         BUG_ON(!space_info); /* Logic error */
4278
4279 again:
4280         spin_lock(&space_info->lock);
4281         if (force < space_info->force_alloc)
4282                 force = space_info->force_alloc;
4283         if (space_info->full) {
4284                 if (should_alloc_chunk(extent_root, space_info, force))
4285                         ret = -ENOSPC;
4286                 else
4287                         ret = 0;
4288                 spin_unlock(&space_info->lock);
4289                 return ret;
4290         }
4291
4292         if (!should_alloc_chunk(extent_root, space_info, force)) {
4293                 spin_unlock(&space_info->lock);
4294                 return 0;
4295         } else if (space_info->chunk_alloc) {
4296                 wait_for_alloc = 1;
4297         } else {
4298                 space_info->chunk_alloc = 1;
4299         }
4300
4301         spin_unlock(&space_info->lock);
4302
4303         mutex_lock(&fs_info->chunk_mutex);
4304
4305         /*
4306          * The chunk_mutex is held throughout the entirety of a chunk
4307          * allocation, so once we've acquired the chunk_mutex we know that the
4308          * other guy is done and we need to recheck and see if we should
4309          * allocate.
4310          */
4311         if (wait_for_alloc) {
4312                 mutex_unlock(&fs_info->chunk_mutex);
4313                 wait_for_alloc = 0;
4314                 goto again;
4315         }
4316
4317         trans->allocating_chunk = true;
4318
4319         /*
4320          * If we have mixed data/metadata chunks we want to make sure we keep
4321          * allocating mixed chunks instead of individual chunks.
4322          */
4323         if (btrfs_mixed_space_info(space_info))
4324                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4325
4326         /*
4327          * if we're doing a data chunk, go ahead and make sure that
4328          * we keep a reasonable number of metadata chunks allocated in the
4329          * FS as well.
4330          */
4331         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4332                 fs_info->data_chunk_allocations++;
4333                 if (!(fs_info->data_chunk_allocations %
4334                       fs_info->metadata_ratio))
4335                         force_metadata_allocation(fs_info);
4336         }
4337
4338         /*
4339          * Check if we have enough space in SYSTEM chunk because we may need
4340          * to update devices.
4341          */
4342         check_system_chunk(trans, extent_root, flags);
4343
4344         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4345         trans->allocating_chunk = false;
4346
4347         spin_lock(&space_info->lock);
4348         if (ret < 0 && ret != -ENOSPC)
4349                 goto out;
4350         if (ret)
4351                 space_info->full = 1;
4352         else
4353                 ret = 1;
4354
4355         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4356 out:
4357         space_info->chunk_alloc = 0;
4358         spin_unlock(&space_info->lock);
4359         mutex_unlock(&fs_info->chunk_mutex);
4360         /*
4361          * When we allocate a new chunk we reserve space in the chunk block
4362          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4363          * add new nodes/leafs to it if we end up needing to do it when
4364          * inserting the chunk item and updating device items as part of the
4365          * second phase of chunk allocation, performed by
4366          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4367          * large number of new block groups to create in our transaction
4368          * handle's new_bgs list to avoid exhausting the chunk block reserve
4369          * in extreme cases - like having a single transaction create many new
4370          * block groups when starting to write out the free space caches of all
4371          * the block groups that were made dirty during the lifetime of the
4372          * transaction.
4373          */
4374         if (trans->can_flush_pending_bgs &&
4375             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4376                 btrfs_create_pending_block_groups(trans, trans->root);
4377                 btrfs_trans_release_chunk_metadata(trans);
4378         }
4379         return ret;
4380 }
4381
4382 static int can_overcommit(struct btrfs_root *root,
4383                           struct btrfs_space_info *space_info, u64 bytes,
4384                           enum btrfs_reserve_flush_enum flush)
4385 {
4386         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4387         u64 profile = btrfs_get_alloc_profile(root, 0);
4388         u64 space_size;
4389         u64 avail;
4390         u64 used;
4391
4392         used = space_info->bytes_used + space_info->bytes_reserved +
4393                 space_info->bytes_pinned + space_info->bytes_readonly;
4394
4395         /*
4396          * We only want to allow over committing if we have lots of actual space
4397          * free, but if we don't have enough space to handle the global reserve
4398          * space then we could end up having a real enospc problem when trying
4399          * to allocate a chunk or some other such important allocation.
4400          */
4401         spin_lock(&global_rsv->lock);
4402         space_size = calc_global_rsv_need_space(global_rsv);
4403         spin_unlock(&global_rsv->lock);
4404         if (used + space_size >= space_info->total_bytes)
4405                 return 0;
4406
4407         used += space_info->bytes_may_use;
4408
4409         spin_lock(&root->fs_info->free_chunk_lock);
4410         avail = root->fs_info->free_chunk_space;
4411         spin_unlock(&root->fs_info->free_chunk_lock);
4412
4413         /*
4414          * If we have dup, raid1 or raid10 then only half of the free
4415          * space is actually useable.  For raid56, the space info used
4416          * doesn't include the parity drive, so we don't have to
4417          * change the math
4418          */
4419         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4420                        BTRFS_BLOCK_GROUP_RAID1 |
4421                        BTRFS_BLOCK_GROUP_RAID10))
4422                 avail >>= 1;
4423
4424         /*
4425          * If we aren't flushing all things, let us overcommit up to
4426          * 1/2th of the space. If we can flush, don't let us overcommit
4427          * too much, let it overcommit up to 1/8 of the space.
4428          */
4429         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4430                 avail >>= 3;
4431         else
4432                 avail >>= 1;
4433
4434         if (used + bytes < space_info->total_bytes + avail)
4435                 return 1;
4436         return 0;
4437 }
4438
4439 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4440                                          unsigned long nr_pages, int nr_items)
4441 {
4442         struct super_block *sb = root->fs_info->sb;
4443
4444         if (down_read_trylock(&sb->s_umount)) {
4445                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4446                 up_read(&sb->s_umount);
4447         } else {
4448                 /*
4449                  * We needn't worry the filesystem going from r/w to r/o though
4450                  * we don't acquire ->s_umount mutex, because the filesystem
4451                  * should guarantee the delalloc inodes list be empty after
4452                  * the filesystem is readonly(all dirty pages are written to
4453                  * the disk).
4454                  */
4455                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4456                 if (!current->journal_info)
4457                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4458         }
4459 }
4460
4461 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4462 {
4463         u64 bytes;
4464         int nr;
4465
4466         bytes = btrfs_calc_trans_metadata_size(root, 1);
4467         nr = (int)div64_u64(to_reclaim, bytes);
4468         if (!nr)
4469                 nr = 1;
4470         return nr;
4471 }
4472
4473 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4474
4475 /*
4476  * shrink metadata reservation for delalloc
4477  */
4478 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4479                             bool wait_ordered)
4480 {
4481         struct btrfs_block_rsv *block_rsv;
4482         struct btrfs_space_info *space_info;
4483         struct btrfs_trans_handle *trans;
4484         u64 delalloc_bytes;
4485         u64 max_reclaim;
4486         long time_left;
4487         unsigned long nr_pages;
4488         int loops;
4489         int items;
4490         enum btrfs_reserve_flush_enum flush;
4491
4492         /* Calc the number of the pages we need flush for space reservation */
4493         items = calc_reclaim_items_nr(root, to_reclaim);
4494         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4495
4496         trans = (struct btrfs_trans_handle *)current->journal_info;
4497         block_rsv = &root->fs_info->delalloc_block_rsv;
4498         space_info = block_rsv->space_info;
4499
4500         delalloc_bytes = percpu_counter_sum_positive(
4501                                                 &root->fs_info->delalloc_bytes);
4502         if (delalloc_bytes == 0) {
4503                 if (trans)
4504                         return;
4505                 if (wait_ordered)
4506                         btrfs_wait_ordered_roots(root->fs_info, items);
4507                 return;
4508         }
4509
4510         loops = 0;
4511         while (delalloc_bytes && loops < 3) {
4512                 max_reclaim = min(delalloc_bytes, to_reclaim);
4513                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4514                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4515                 /*
4516                  * We need to wait for the async pages to actually start before
4517                  * we do anything.
4518                  */
4519                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4520                 if (!max_reclaim)
4521                         goto skip_async;
4522
4523                 if (max_reclaim <= nr_pages)
4524                         max_reclaim = 0;
4525                 else
4526                         max_reclaim -= nr_pages;
4527
4528                 wait_event(root->fs_info->async_submit_wait,
4529                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4530                            (int)max_reclaim);
4531 skip_async:
4532                 if (!trans)
4533                         flush = BTRFS_RESERVE_FLUSH_ALL;
4534                 else
4535                         flush = BTRFS_RESERVE_NO_FLUSH;
4536                 spin_lock(&space_info->lock);
4537                 if (can_overcommit(root, space_info, orig, flush)) {
4538                         spin_unlock(&space_info->lock);
4539                         break;
4540                 }
4541                 spin_unlock(&space_info->lock);
4542
4543                 loops++;
4544                 if (wait_ordered && !trans) {
4545                         btrfs_wait_ordered_roots(root->fs_info, items);
4546                 } else {
4547                         time_left = schedule_timeout_killable(1);
4548                         if (time_left)
4549                                 break;
4550                 }
4551                 delalloc_bytes = percpu_counter_sum_positive(
4552                                                 &root->fs_info->delalloc_bytes);
4553         }
4554 }
4555
4556 /**
4557  * maybe_commit_transaction - possibly commit the transaction if its ok to
4558  * @root - the root we're allocating for
4559  * @bytes - the number of bytes we want to reserve
4560  * @force - force the commit
4561  *
4562  * This will check to make sure that committing the transaction will actually
4563  * get us somewhere and then commit the transaction if it does.  Otherwise it
4564  * will return -ENOSPC.
4565  */
4566 static int may_commit_transaction(struct btrfs_root *root,
4567                                   struct btrfs_space_info *space_info,
4568                                   u64 bytes, int force)
4569 {
4570         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4571         struct btrfs_trans_handle *trans;
4572
4573         trans = (struct btrfs_trans_handle *)current->journal_info;
4574         if (trans)
4575                 return -EAGAIN;
4576
4577         if (force)
4578                 goto commit;
4579
4580         /* See if there is enough pinned space to make this reservation */
4581         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4582                                    bytes) >= 0)
4583                 goto commit;
4584
4585         /*
4586          * See if there is some space in the delayed insertion reservation for
4587          * this reservation.
4588          */
4589         if (space_info != delayed_rsv->space_info)
4590                 return -ENOSPC;
4591
4592         spin_lock(&delayed_rsv->lock);
4593         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4594                                    bytes - delayed_rsv->size) >= 0) {
4595                 spin_unlock(&delayed_rsv->lock);
4596                 return -ENOSPC;
4597         }
4598         spin_unlock(&delayed_rsv->lock);
4599
4600 commit:
4601         trans = btrfs_join_transaction(root);
4602         if (IS_ERR(trans))
4603                 return -ENOSPC;
4604
4605         return btrfs_commit_transaction(trans, root);
4606 }
4607
4608 enum flush_state {
4609         FLUSH_DELAYED_ITEMS_NR  =       1,
4610         FLUSH_DELAYED_ITEMS     =       2,
4611         FLUSH_DELALLOC          =       3,
4612         FLUSH_DELALLOC_WAIT     =       4,
4613         ALLOC_CHUNK             =       5,
4614         COMMIT_TRANS            =       6,
4615 };
4616
4617 static int flush_space(struct btrfs_root *root,
4618                        struct btrfs_space_info *space_info, u64 num_bytes,
4619                        u64 orig_bytes, int state)
4620 {
4621         struct btrfs_trans_handle *trans;
4622         int nr;
4623         int ret = 0;
4624
4625         switch (state) {
4626         case FLUSH_DELAYED_ITEMS_NR:
4627         case FLUSH_DELAYED_ITEMS:
4628                 if (state == FLUSH_DELAYED_ITEMS_NR)
4629                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4630                 else
4631                         nr = -1;
4632
4633                 trans = btrfs_join_transaction(root);
4634                 if (IS_ERR(trans)) {
4635                         ret = PTR_ERR(trans);
4636                         break;
4637                 }
4638                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4639                 btrfs_end_transaction(trans, root);
4640                 break;
4641         case FLUSH_DELALLOC:
4642         case FLUSH_DELALLOC_WAIT:
4643                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4644                                 state == FLUSH_DELALLOC_WAIT);
4645                 break;
4646         case ALLOC_CHUNK:
4647                 trans = btrfs_join_transaction(root);
4648                 if (IS_ERR(trans)) {
4649                         ret = PTR_ERR(trans);
4650                         break;
4651                 }
4652                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4653                                      btrfs_get_alloc_profile(root, 0),
4654                                      CHUNK_ALLOC_NO_FORCE);
4655                 btrfs_end_transaction(trans, root);
4656                 if (ret == -ENOSPC)
4657                         ret = 0;
4658                 break;
4659         case COMMIT_TRANS:
4660                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4661                 break;
4662         default:
4663                 ret = -ENOSPC;
4664                 break;
4665         }
4666
4667         return ret;
4668 }
4669
4670 static inline u64
4671 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4672                                  struct btrfs_space_info *space_info)
4673 {
4674         u64 used;
4675         u64 expected;
4676         u64 to_reclaim;
4677
4678         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4679                                 16 * 1024 * 1024);
4680         spin_lock(&space_info->lock);
4681         if (can_overcommit(root, space_info, to_reclaim,
4682                            BTRFS_RESERVE_FLUSH_ALL)) {
4683                 to_reclaim = 0;
4684                 goto out;
4685         }
4686
4687         used = space_info->bytes_used + space_info->bytes_reserved +
4688                space_info->bytes_pinned + space_info->bytes_readonly +
4689                space_info->bytes_may_use;
4690         if (can_overcommit(root, space_info, 1024 * 1024,
4691                            BTRFS_RESERVE_FLUSH_ALL))
4692                 expected = div_factor_fine(space_info->total_bytes, 95);
4693         else
4694                 expected = div_factor_fine(space_info->total_bytes, 90);
4695
4696         if (used > expected)
4697                 to_reclaim = used - expected;
4698         else
4699                 to_reclaim = 0;
4700         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4701                                      space_info->bytes_reserved);
4702 out:
4703         spin_unlock(&space_info->lock);
4704
4705         return to_reclaim;
4706 }
4707
4708 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4709                                         struct btrfs_fs_info *fs_info, u64 used)
4710 {
4711         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4712
4713         /* If we're just plain full then async reclaim just slows us down. */
4714         if (space_info->bytes_used >= thresh)
4715                 return 0;
4716
4717         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4718                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4719 }
4720
4721 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4722                                        struct btrfs_fs_info *fs_info,
4723                                        int flush_state)
4724 {
4725         u64 used;
4726
4727         spin_lock(&space_info->lock);
4728         /*
4729          * We run out of space and have not got any free space via flush_space,
4730          * so don't bother doing async reclaim.
4731          */
4732         if (flush_state > COMMIT_TRANS && space_info->full) {
4733                 spin_unlock(&space_info->lock);
4734                 return 0;
4735         }
4736
4737         used = space_info->bytes_used + space_info->bytes_reserved +
4738                space_info->bytes_pinned + space_info->bytes_readonly +
4739                space_info->bytes_may_use;
4740         if (need_do_async_reclaim(space_info, fs_info, used)) {
4741                 spin_unlock(&space_info->lock);
4742                 return 1;
4743         }
4744         spin_unlock(&space_info->lock);
4745
4746         return 0;
4747 }
4748
4749 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4750 {
4751         struct btrfs_fs_info *fs_info;
4752         struct btrfs_space_info *space_info;
4753         u64 to_reclaim;
4754         int flush_state;
4755
4756         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4757         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4758
4759         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4760                                                       space_info);
4761         if (!to_reclaim)
4762                 return;
4763
4764         flush_state = FLUSH_DELAYED_ITEMS_NR;
4765         do {
4766                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4767                             to_reclaim, flush_state);
4768                 flush_state++;
4769                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4770                                                  flush_state))
4771                         return;
4772         } while (flush_state < COMMIT_TRANS);
4773 }
4774
4775 void btrfs_init_async_reclaim_work(struct work_struct *work)
4776 {
4777         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4778 }
4779
4780 /**
4781  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4782  * @root - the root we're allocating for
4783  * @block_rsv - the block_rsv we're allocating for
4784  * @orig_bytes - the number of bytes we want
4785  * @flush - whether or not we can flush to make our reservation
4786  *
4787  * This will reserve orgi_bytes number of bytes from the space info associated
4788  * with the block_rsv.  If there is not enough space it will make an attempt to
4789  * flush out space to make room.  It will do this by flushing delalloc if
4790  * possible or committing the transaction.  If flush is 0 then no attempts to
4791  * regain reservations will be made and this will fail if there is not enough
4792  * space already.
4793  */
4794 static int reserve_metadata_bytes(struct btrfs_root *root,
4795                                   struct btrfs_block_rsv *block_rsv,
4796                                   u64 orig_bytes,
4797                                   enum btrfs_reserve_flush_enum flush)
4798 {
4799         struct btrfs_space_info *space_info = block_rsv->space_info;
4800         u64 used;
4801         u64 num_bytes = orig_bytes;
4802         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4803         int ret = 0;
4804         bool flushing = false;
4805
4806 again:
4807         ret = 0;
4808         spin_lock(&space_info->lock);
4809         /*
4810          * We only want to wait if somebody other than us is flushing and we
4811          * are actually allowed to flush all things.
4812          */
4813         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4814                space_info->flush) {
4815                 spin_unlock(&space_info->lock);
4816                 /*
4817                  * If we have a trans handle we can't wait because the flusher
4818                  * may have to commit the transaction, which would mean we would
4819                  * deadlock since we are waiting for the flusher to finish, but
4820                  * hold the current transaction open.
4821                  */
4822                 if (current->journal_info)
4823                         return -EAGAIN;
4824                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4825                 /* Must have been killed, return */
4826                 if (ret)
4827                         return -EINTR;
4828
4829                 spin_lock(&space_info->lock);
4830         }
4831
4832         ret = -ENOSPC;
4833         used = space_info->bytes_used + space_info->bytes_reserved +
4834                 space_info->bytes_pinned + space_info->bytes_readonly +
4835                 space_info->bytes_may_use;
4836
4837         /*
4838          * The idea here is that we've not already over-reserved the block group
4839          * then we can go ahead and save our reservation first and then start
4840          * flushing if we need to.  Otherwise if we've already overcommitted
4841          * lets start flushing stuff first and then come back and try to make
4842          * our reservation.
4843          */
4844         if (used <= space_info->total_bytes) {
4845                 if (used + orig_bytes <= space_info->total_bytes) {
4846                         space_info->bytes_may_use += orig_bytes;
4847                         trace_btrfs_space_reservation(root->fs_info,
4848                                 "space_info", space_info->flags, orig_bytes, 1);
4849                         ret = 0;
4850                 } else {
4851                         /*
4852                          * Ok set num_bytes to orig_bytes since we aren't
4853                          * overocmmitted, this way we only try and reclaim what
4854                          * we need.
4855                          */
4856                         num_bytes = orig_bytes;
4857                 }
4858         } else {
4859                 /*
4860                  * Ok we're over committed, set num_bytes to the overcommitted
4861                  * amount plus the amount of bytes that we need for this
4862                  * reservation.
4863                  */
4864                 num_bytes = used - space_info->total_bytes +
4865                         (orig_bytes * 2);
4866         }
4867
4868         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4869                 space_info->bytes_may_use += orig_bytes;
4870                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4871                                               space_info->flags, orig_bytes,
4872                                               1);
4873                 ret = 0;
4874         }
4875
4876         /*
4877          * Couldn't make our reservation, save our place so while we're trying
4878          * to reclaim space we can actually use it instead of somebody else
4879          * stealing it from us.
4880          *
4881          * We make the other tasks wait for the flush only when we can flush
4882          * all things.
4883          */
4884         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4885                 flushing = true;
4886                 space_info->flush = 1;
4887         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4888                 used += orig_bytes;
4889                 /*
4890                  * We will do the space reservation dance during log replay,
4891                  * which means we won't have fs_info->fs_root set, so don't do
4892                  * the async reclaim as we will panic.
4893                  */
4894                 if (!root->fs_info->log_root_recovering &&
4895                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4896                     !work_busy(&root->fs_info->async_reclaim_work))
4897                         queue_work(system_unbound_wq,
4898                                    &root->fs_info->async_reclaim_work);
4899         }
4900         spin_unlock(&space_info->lock);
4901
4902         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4903                 goto out;
4904
4905         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4906                           flush_state);
4907         flush_state++;
4908
4909         /*
4910          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4911          * would happen. So skip delalloc flush.
4912          */
4913         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4914             (flush_state == FLUSH_DELALLOC ||
4915              flush_state == FLUSH_DELALLOC_WAIT))
4916                 flush_state = ALLOC_CHUNK;
4917
4918         if (!ret)
4919                 goto again;
4920         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4921                  flush_state < COMMIT_TRANS)
4922                 goto again;
4923         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4924                  flush_state <= COMMIT_TRANS)
4925                 goto again;
4926
4927 out:
4928         if (ret == -ENOSPC &&
4929             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4930                 struct btrfs_block_rsv *global_rsv =
4931                         &root->fs_info->global_block_rsv;
4932
4933                 if (block_rsv != global_rsv &&
4934                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4935                         ret = 0;
4936         }
4937         if (ret == -ENOSPC)
4938                 trace_btrfs_space_reservation(root->fs_info,
4939                                               "space_info:enospc",
4940                                               space_info->flags, orig_bytes, 1);
4941         if (flushing) {
4942                 spin_lock(&space_info->lock);
4943                 space_info->flush = 0;
4944                 wake_up_all(&space_info->wait);
4945                 spin_unlock(&space_info->lock);
4946         }
4947         return ret;
4948 }
4949
4950 static struct btrfs_block_rsv *get_block_rsv(
4951                                         const struct btrfs_trans_handle *trans,
4952                                         const struct btrfs_root *root)
4953 {
4954         struct btrfs_block_rsv *block_rsv = NULL;
4955
4956         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4957             (root == root->fs_info->csum_root && trans->adding_csums) ||
4958              (root == root->fs_info->uuid_root))
4959                 block_rsv = trans->block_rsv;
4960
4961         if (!block_rsv)
4962                 block_rsv = root->block_rsv;
4963
4964         if (!block_rsv)
4965                 block_rsv = &root->fs_info->empty_block_rsv;
4966
4967         return block_rsv;
4968 }
4969
4970 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4971                                u64 num_bytes)
4972 {
4973         int ret = -ENOSPC;
4974         spin_lock(&block_rsv->lock);
4975         if (block_rsv->reserved >= num_bytes) {
4976                 block_rsv->reserved -= num_bytes;
4977                 if (block_rsv->reserved < block_rsv->size)
4978                         block_rsv->full = 0;
4979                 ret = 0;
4980         }
4981         spin_unlock(&block_rsv->lock);
4982         return ret;
4983 }
4984
4985 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4986                                 u64 num_bytes, int update_size)
4987 {
4988         spin_lock(&block_rsv->lock);
4989         block_rsv->reserved += num_bytes;
4990         if (update_size)
4991                 block_rsv->size += num_bytes;
4992         else if (block_rsv->reserved >= block_rsv->size)
4993                 block_rsv->full = 1;
4994         spin_unlock(&block_rsv->lock);
4995 }
4996
4997 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4998                              struct btrfs_block_rsv *dest, u64 num_bytes,
4999                              int min_factor)
5000 {
5001         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5002         u64 min_bytes;
5003
5004         if (global_rsv->space_info != dest->space_info)
5005                 return -ENOSPC;
5006
5007         spin_lock(&global_rsv->lock);
5008         min_bytes = div_factor(global_rsv->size, min_factor);
5009         if (global_rsv->reserved < min_bytes + num_bytes) {
5010                 spin_unlock(&global_rsv->lock);
5011                 return -ENOSPC;
5012         }
5013         global_rsv->reserved -= num_bytes;
5014         if (global_rsv->reserved < global_rsv->size)
5015                 global_rsv->full = 0;
5016         spin_unlock(&global_rsv->lock);
5017
5018         block_rsv_add_bytes(dest, num_bytes, 1);
5019         return 0;
5020 }
5021
5022 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5023                                     struct btrfs_block_rsv *block_rsv,
5024                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5025 {
5026         struct btrfs_space_info *space_info = block_rsv->space_info;
5027
5028         spin_lock(&block_rsv->lock);
5029         if (num_bytes == (u64)-1)
5030                 num_bytes = block_rsv->size;
5031         block_rsv->size -= num_bytes;
5032         if (block_rsv->reserved >= block_rsv->size) {
5033                 num_bytes = block_rsv->reserved - block_rsv->size;
5034                 block_rsv->reserved = block_rsv->size;
5035                 block_rsv->full = 1;
5036         } else {
5037                 num_bytes = 0;
5038         }
5039         spin_unlock(&block_rsv->lock);
5040
5041         if (num_bytes > 0) {
5042                 if (dest) {
5043                         spin_lock(&dest->lock);
5044                         if (!dest->full) {
5045                                 u64 bytes_to_add;
5046
5047                                 bytes_to_add = dest->size - dest->reserved;
5048                                 bytes_to_add = min(num_bytes, bytes_to_add);
5049                                 dest->reserved += bytes_to_add;
5050                                 if (dest->reserved >= dest->size)
5051                                         dest->full = 1;
5052                                 num_bytes -= bytes_to_add;
5053                         }
5054                         spin_unlock(&dest->lock);
5055                 }
5056                 if (num_bytes) {
5057                         spin_lock(&space_info->lock);
5058                         space_info->bytes_may_use -= num_bytes;
5059                         trace_btrfs_space_reservation(fs_info, "space_info",
5060                                         space_info->flags, num_bytes, 0);
5061                         spin_unlock(&space_info->lock);
5062                 }
5063         }
5064 }
5065
5066 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5067                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5068 {
5069         int ret;
5070
5071         ret = block_rsv_use_bytes(src, num_bytes);
5072         if (ret)
5073                 return ret;
5074
5075         block_rsv_add_bytes(dst, num_bytes, 1);
5076         return 0;
5077 }
5078
5079 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5080 {
5081         memset(rsv, 0, sizeof(*rsv));
5082         spin_lock_init(&rsv->lock);
5083         rsv->type = type;
5084 }
5085
5086 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5087                                               unsigned short type)
5088 {
5089         struct btrfs_block_rsv *block_rsv;
5090         struct btrfs_fs_info *fs_info = root->fs_info;
5091
5092         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5093         if (!block_rsv)
5094                 return NULL;
5095
5096         btrfs_init_block_rsv(block_rsv, type);
5097         block_rsv->space_info = __find_space_info(fs_info,
5098                                                   BTRFS_BLOCK_GROUP_METADATA);
5099         return block_rsv;
5100 }
5101
5102 void btrfs_free_block_rsv(struct btrfs_root *root,
5103                           struct btrfs_block_rsv *rsv)
5104 {
5105         if (!rsv)
5106                 return;
5107         btrfs_block_rsv_release(root, rsv, (u64)-1);
5108         kfree(rsv);
5109 }
5110
5111 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5112 {
5113         kfree(rsv);
5114 }
5115
5116 int btrfs_block_rsv_add(struct btrfs_root *root,
5117                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5118                         enum btrfs_reserve_flush_enum flush)
5119 {
5120         int ret;
5121
5122         if (num_bytes == 0)
5123                 return 0;
5124
5125         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5126         if (!ret) {
5127                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5128                 return 0;
5129         }
5130
5131         return ret;
5132 }
5133
5134 int btrfs_block_rsv_check(struct btrfs_root *root,
5135                           struct btrfs_block_rsv *block_rsv, int min_factor)
5136 {
5137         u64 num_bytes = 0;
5138         int ret = -ENOSPC;
5139
5140         if (!block_rsv)
5141                 return 0;
5142
5143         spin_lock(&block_rsv->lock);
5144         num_bytes = div_factor(block_rsv->size, min_factor);
5145         if (block_rsv->reserved >= num_bytes)
5146                 ret = 0;
5147         spin_unlock(&block_rsv->lock);
5148
5149         return ret;
5150 }
5151
5152 int btrfs_block_rsv_refill(struct btrfs_root *root,
5153                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5154                            enum btrfs_reserve_flush_enum flush)
5155 {
5156         u64 num_bytes = 0;
5157         int ret = -ENOSPC;
5158
5159         if (!block_rsv)
5160                 return 0;
5161
5162         spin_lock(&block_rsv->lock);
5163         num_bytes = min_reserved;
5164         if (block_rsv->reserved >= num_bytes)
5165                 ret = 0;
5166         else
5167                 num_bytes -= block_rsv->reserved;
5168         spin_unlock(&block_rsv->lock);
5169
5170         if (!ret)
5171                 return 0;
5172
5173         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5174         if (!ret) {
5175                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5176                 return 0;
5177         }
5178
5179         return ret;
5180 }
5181
5182 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5183                             struct btrfs_block_rsv *dst_rsv,
5184                             u64 num_bytes)
5185 {
5186         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5187 }
5188
5189 void btrfs_block_rsv_release(struct btrfs_root *root,
5190                              struct btrfs_block_rsv *block_rsv,
5191                              u64 num_bytes)
5192 {
5193         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5194         if (global_rsv == block_rsv ||
5195             block_rsv->space_info != global_rsv->space_info)
5196                 global_rsv = NULL;
5197         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5198                                 num_bytes);
5199 }
5200
5201 /*
5202  * helper to calculate size of global block reservation.
5203  * the desired value is sum of space used by extent tree,
5204  * checksum tree and root tree
5205  */
5206 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5207 {
5208         struct btrfs_space_info *sinfo;
5209         u64 num_bytes;
5210         u64 meta_used;
5211         u64 data_used;
5212         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5213
5214         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5215         spin_lock(&sinfo->lock);
5216         data_used = sinfo->bytes_used;
5217         spin_unlock(&sinfo->lock);
5218
5219         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5220         spin_lock(&sinfo->lock);
5221         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5222                 data_used = 0;
5223         meta_used = sinfo->bytes_used;
5224         spin_unlock(&sinfo->lock);
5225
5226         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5227                     csum_size * 2;
5228         num_bytes += div_u64(data_used + meta_used, 50);
5229
5230         if (num_bytes * 3 > meta_used)
5231                 num_bytes = div_u64(meta_used, 3);
5232
5233         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5234 }
5235
5236 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5237 {
5238         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5239         struct btrfs_space_info *sinfo = block_rsv->space_info;
5240         u64 num_bytes;
5241
5242         num_bytes = calc_global_metadata_size(fs_info);
5243
5244         spin_lock(&sinfo->lock);
5245         spin_lock(&block_rsv->lock);
5246
5247         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5248
5249         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5250                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5251                     sinfo->bytes_may_use;
5252
5253         if (sinfo->total_bytes > num_bytes) {
5254                 num_bytes = sinfo->total_bytes - num_bytes;
5255                 block_rsv->reserved += num_bytes;
5256                 sinfo->bytes_may_use += num_bytes;
5257                 trace_btrfs_space_reservation(fs_info, "space_info",
5258                                       sinfo->flags, num_bytes, 1);
5259         }
5260
5261         if (block_rsv->reserved >= block_rsv->size) {
5262                 num_bytes = block_rsv->reserved - block_rsv->size;
5263                 sinfo->bytes_may_use -= num_bytes;
5264                 trace_btrfs_space_reservation(fs_info, "space_info",
5265                                       sinfo->flags, num_bytes, 0);
5266                 block_rsv->reserved = block_rsv->size;
5267                 block_rsv->full = 1;
5268         }
5269
5270         spin_unlock(&block_rsv->lock);
5271         spin_unlock(&sinfo->lock);
5272 }
5273
5274 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5275 {
5276         struct btrfs_space_info *space_info;
5277
5278         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5279         fs_info->chunk_block_rsv.space_info = space_info;
5280
5281         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5282         fs_info->global_block_rsv.space_info = space_info;
5283         fs_info->delalloc_block_rsv.space_info = space_info;
5284         fs_info->trans_block_rsv.space_info = space_info;
5285         fs_info->empty_block_rsv.space_info = space_info;
5286         fs_info->delayed_block_rsv.space_info = space_info;
5287
5288         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5289         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5290         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5291         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5292         if (fs_info->quota_root)
5293                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5294         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5295
5296         update_global_block_rsv(fs_info);
5297 }
5298
5299 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5300 {
5301         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5302                                 (u64)-1);
5303         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5304         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5305         WARN_ON(fs_info->trans_block_rsv.size > 0);
5306         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5307         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5308         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5309         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5310         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5311 }
5312
5313 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5314                                   struct btrfs_root *root)
5315 {
5316         if (!trans->block_rsv)
5317                 return;
5318
5319         if (!trans->bytes_reserved)
5320                 return;
5321
5322         trace_btrfs_space_reservation(root->fs_info, "transaction",
5323                                       trans->transid, trans->bytes_reserved, 0);
5324         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5325         trans->bytes_reserved = 0;
5326 }
5327
5328 /*
5329  * To be called after all the new block groups attached to the transaction
5330  * handle have been created (btrfs_create_pending_block_groups()).
5331  */
5332 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5333 {
5334         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5335
5336         if (!trans->chunk_bytes_reserved)
5337                 return;
5338
5339         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5340
5341         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5342                                 trans->chunk_bytes_reserved);
5343         trans->chunk_bytes_reserved = 0;
5344 }
5345
5346 /* Can only return 0 or -ENOSPC */
5347 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5348                                   struct inode *inode)
5349 {
5350         struct btrfs_root *root = BTRFS_I(inode)->root;
5351         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5352         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5353
5354         /*
5355          * We need to hold space in order to delete our orphan item once we've
5356          * added it, so this takes the reservation so we can release it later
5357          * when we are truly done with the orphan item.
5358          */
5359         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5360         trace_btrfs_space_reservation(root->fs_info, "orphan",
5361                                       btrfs_ino(inode), num_bytes, 1);
5362         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5363 }
5364
5365 void btrfs_orphan_release_metadata(struct inode *inode)
5366 {
5367         struct btrfs_root *root = BTRFS_I(inode)->root;
5368         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5369         trace_btrfs_space_reservation(root->fs_info, "orphan",
5370                                       btrfs_ino(inode), num_bytes, 0);
5371         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5372 }
5373
5374 /*
5375  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5376  * root: the root of the parent directory
5377  * rsv: block reservation
5378  * items: the number of items that we need do reservation
5379  * qgroup_reserved: used to return the reserved size in qgroup
5380  *
5381  * This function is used to reserve the space for snapshot/subvolume
5382  * creation and deletion. Those operations are different with the
5383  * common file/directory operations, they change two fs/file trees
5384  * and root tree, the number of items that the qgroup reserves is
5385  * different with the free space reservation. So we can not use
5386  * the space reseravtion mechanism in start_transaction().
5387  */
5388 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5389                                      struct btrfs_block_rsv *rsv,
5390                                      int items,
5391                                      u64 *qgroup_reserved,
5392                                      bool use_global_rsv)
5393 {
5394         u64 num_bytes;
5395         int ret;
5396         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5397
5398         if (root->fs_info->quota_enabled) {
5399                 /* One for parent inode, two for dir entries */
5400                 num_bytes = 3 * root->nodesize;
5401                 ret = btrfs_qgroup_reserve(root, num_bytes);
5402                 if (ret)
5403                         return ret;
5404         } else {
5405                 num_bytes = 0;
5406         }
5407
5408         *qgroup_reserved = num_bytes;
5409
5410         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5411         rsv->space_info = __find_space_info(root->fs_info,
5412                                             BTRFS_BLOCK_GROUP_METADATA);
5413         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5414                                   BTRFS_RESERVE_FLUSH_ALL);
5415
5416         if (ret == -ENOSPC && use_global_rsv)
5417                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5418
5419         if (ret) {
5420                 if (*qgroup_reserved)
5421                         btrfs_qgroup_free(root, *qgroup_reserved);
5422         }
5423
5424         return ret;
5425 }
5426
5427 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5428                                       struct btrfs_block_rsv *rsv,
5429                                       u64 qgroup_reserved)
5430 {
5431         btrfs_block_rsv_release(root, rsv, (u64)-1);
5432 }
5433
5434 /**
5435  * drop_outstanding_extent - drop an outstanding extent
5436  * @inode: the inode we're dropping the extent for
5437  * @num_bytes: the number of bytes we're relaseing.
5438  *
5439  * This is called when we are freeing up an outstanding extent, either called
5440  * after an error or after an extent is written.  This will return the number of
5441  * reserved extents that need to be freed.  This must be called with
5442  * BTRFS_I(inode)->lock held.
5443  */
5444 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5445 {
5446         unsigned drop_inode_space = 0;
5447         unsigned dropped_extents = 0;
5448         unsigned num_extents = 0;
5449
5450         num_extents = (unsigned)div64_u64(num_bytes +
5451                                           BTRFS_MAX_EXTENT_SIZE - 1,
5452                                           BTRFS_MAX_EXTENT_SIZE);
5453         ASSERT(num_extents);
5454         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5455         BTRFS_I(inode)->outstanding_extents -= num_extents;
5456
5457         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5458             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5459                                &BTRFS_I(inode)->runtime_flags))
5460                 drop_inode_space = 1;
5461
5462         /*
5463          * If we have more or the same amount of outsanding extents than we have
5464          * reserved then we need to leave the reserved extents count alone.
5465          */
5466         if (BTRFS_I(inode)->outstanding_extents >=
5467             BTRFS_I(inode)->reserved_extents)
5468                 return drop_inode_space;
5469
5470         dropped_extents = BTRFS_I(inode)->reserved_extents -
5471                 BTRFS_I(inode)->outstanding_extents;
5472         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5473         return dropped_extents + drop_inode_space;
5474 }
5475
5476 /**
5477  * calc_csum_metadata_size - return the amount of metada space that must be
5478  *      reserved/free'd for the given bytes.
5479  * @inode: the inode we're manipulating
5480  * @num_bytes: the number of bytes in question
5481  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5482  *
5483  * This adjusts the number of csum_bytes in the inode and then returns the
5484  * correct amount of metadata that must either be reserved or freed.  We
5485  * calculate how many checksums we can fit into one leaf and then divide the
5486  * number of bytes that will need to be checksumed by this value to figure out
5487  * how many checksums will be required.  If we are adding bytes then the number
5488  * may go up and we will return the number of additional bytes that must be
5489  * reserved.  If it is going down we will return the number of bytes that must
5490  * be freed.
5491  *
5492  * This must be called with BTRFS_I(inode)->lock held.
5493  */
5494 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5495                                    int reserve)
5496 {
5497         struct btrfs_root *root = BTRFS_I(inode)->root;
5498         u64 old_csums, num_csums;
5499
5500         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5501             BTRFS_I(inode)->csum_bytes == 0)
5502                 return 0;
5503
5504         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5505         if (reserve)
5506                 BTRFS_I(inode)->csum_bytes += num_bytes;
5507         else
5508                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5509         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5510
5511         /* No change, no need to reserve more */
5512         if (old_csums == num_csums)
5513                 return 0;
5514
5515         if (reserve)
5516                 return btrfs_calc_trans_metadata_size(root,
5517                                                       num_csums - old_csums);
5518
5519         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5520 }
5521
5522 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5523 {
5524         struct btrfs_root *root = BTRFS_I(inode)->root;
5525         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5526         u64 to_reserve = 0;
5527         u64 csum_bytes;
5528         unsigned nr_extents = 0;
5529         int extra_reserve = 0;
5530         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5531         int ret = 0;
5532         bool delalloc_lock = true;
5533         u64 to_free = 0;
5534         unsigned dropped;
5535
5536         /* If we are a free space inode we need to not flush since we will be in
5537          * the middle of a transaction commit.  We also don't need the delalloc
5538          * mutex since we won't race with anybody.  We need this mostly to make
5539          * lockdep shut its filthy mouth.
5540          */
5541         if (btrfs_is_free_space_inode(inode)) {
5542                 flush = BTRFS_RESERVE_NO_FLUSH;
5543                 delalloc_lock = false;
5544         }
5545
5546         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5547             btrfs_transaction_in_commit(root->fs_info))
5548                 schedule_timeout(1);
5549
5550         if (delalloc_lock)
5551                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5552
5553         num_bytes = ALIGN(num_bytes, root->sectorsize);
5554
5555         spin_lock(&BTRFS_I(inode)->lock);
5556         nr_extents = (unsigned)div64_u64(num_bytes +
5557                                          BTRFS_MAX_EXTENT_SIZE - 1,
5558                                          BTRFS_MAX_EXTENT_SIZE);
5559         BTRFS_I(inode)->outstanding_extents += nr_extents;
5560         nr_extents = 0;
5561
5562         if (BTRFS_I(inode)->outstanding_extents >
5563             BTRFS_I(inode)->reserved_extents)
5564                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5565                         BTRFS_I(inode)->reserved_extents;
5566
5567         /*
5568          * Add an item to reserve for updating the inode when we complete the
5569          * delalloc io.
5570          */
5571         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5572                       &BTRFS_I(inode)->runtime_flags)) {
5573                 nr_extents++;
5574                 extra_reserve = 1;
5575         }
5576
5577         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5578         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5579         csum_bytes = BTRFS_I(inode)->csum_bytes;
5580         spin_unlock(&BTRFS_I(inode)->lock);
5581
5582         if (root->fs_info->quota_enabled) {
5583                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5584                 if (ret)
5585                         goto out_fail;
5586         }
5587
5588         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5589         if (unlikely(ret)) {
5590                 if (root->fs_info->quota_enabled)
5591                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5592                 goto out_fail;
5593         }
5594
5595         spin_lock(&BTRFS_I(inode)->lock);
5596         if (extra_reserve) {
5597                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5598                         &BTRFS_I(inode)->runtime_flags);
5599                 nr_extents--;
5600         }
5601         BTRFS_I(inode)->reserved_extents += nr_extents;
5602         spin_unlock(&BTRFS_I(inode)->lock);
5603
5604         if (delalloc_lock)
5605                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5606
5607         if (to_reserve)
5608                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5609                                               btrfs_ino(inode), to_reserve, 1);
5610         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5611
5612         return 0;
5613
5614 out_fail:
5615         spin_lock(&BTRFS_I(inode)->lock);
5616         dropped = drop_outstanding_extent(inode, num_bytes);
5617         /*
5618          * If the inodes csum_bytes is the same as the original
5619          * csum_bytes then we know we haven't raced with any free()ers
5620          * so we can just reduce our inodes csum bytes and carry on.
5621          */
5622         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5623                 calc_csum_metadata_size(inode, num_bytes, 0);
5624         } else {
5625                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5626                 u64 bytes;
5627
5628                 /*
5629                  * This is tricky, but first we need to figure out how much we
5630                  * free'd from any free-ers that occured during this
5631                  * reservation, so we reset ->csum_bytes to the csum_bytes
5632                  * before we dropped our lock, and then call the free for the
5633                  * number of bytes that were freed while we were trying our
5634                  * reservation.
5635                  */
5636                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5637                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5638                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5639
5640
5641                 /*
5642                  * Now we need to see how much we would have freed had we not
5643                  * been making this reservation and our ->csum_bytes were not
5644                  * artificially inflated.
5645                  */
5646                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5647                 bytes = csum_bytes - orig_csum_bytes;
5648                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5649
5650                 /*
5651                  * Now reset ->csum_bytes to what it should be.  If bytes is
5652                  * more than to_free then we would have free'd more space had we
5653                  * not had an artificially high ->csum_bytes, so we need to free
5654                  * the remainder.  If bytes is the same or less then we don't
5655                  * need to do anything, the other free-ers did the correct
5656                  * thing.
5657                  */
5658                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5659                 if (bytes > to_free)
5660                         to_free = bytes - to_free;
5661                 else
5662                         to_free = 0;
5663         }
5664         spin_unlock(&BTRFS_I(inode)->lock);
5665         if (dropped)
5666                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5667
5668         if (to_free) {
5669                 btrfs_block_rsv_release(root, block_rsv, to_free);
5670                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5671                                               btrfs_ino(inode), to_free, 0);
5672         }
5673         if (delalloc_lock)
5674                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5675         return ret;
5676 }
5677
5678 /**
5679  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5680  * @inode: the inode to release the reservation for
5681  * @num_bytes: the number of bytes we're releasing
5682  *
5683  * This will release the metadata reservation for an inode.  This can be called
5684  * once we complete IO for a given set of bytes to release their metadata
5685  * reservations.
5686  */
5687 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5688 {
5689         struct btrfs_root *root = BTRFS_I(inode)->root;
5690         u64 to_free = 0;
5691         unsigned dropped;
5692
5693         num_bytes = ALIGN(num_bytes, root->sectorsize);
5694         spin_lock(&BTRFS_I(inode)->lock);
5695         dropped = drop_outstanding_extent(inode, num_bytes);
5696
5697         if (num_bytes)
5698                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5699         spin_unlock(&BTRFS_I(inode)->lock);
5700         if (dropped > 0)
5701                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5702
5703         if (btrfs_test_is_dummy_root(root))
5704                 return;
5705
5706         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5707                                       btrfs_ino(inode), to_free, 0);
5708
5709         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5710                                 to_free);
5711 }
5712
5713 /**
5714  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5715  * @inode: inode we're writing to
5716  * @num_bytes: the number of bytes we want to allocate
5717  *
5718  * This will do the following things
5719  *
5720  * o reserve space in the data space info for num_bytes
5721  * o reserve space in the metadata space info based on number of outstanding
5722  *   extents and how much csums will be needed
5723  * o add to the inodes ->delalloc_bytes
5724  * o add it to the fs_info's delalloc inodes list.
5725  *
5726  * This will return 0 for success and -ENOSPC if there is no space left.
5727  */
5728 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5729 {
5730         int ret;
5731
5732         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5733         if (ret)
5734                 return ret;
5735
5736         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5737         if (ret) {
5738                 btrfs_free_reserved_data_space(inode, num_bytes);
5739                 return ret;
5740         }
5741
5742         return 0;
5743 }
5744
5745 /**
5746  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5747  * @inode: inode we're releasing space for
5748  * @num_bytes: the number of bytes we want to free up
5749  *
5750  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5751  * called in the case that we don't need the metadata AND data reservations
5752  * anymore.  So if there is an error or we insert an inline extent.
5753  *
5754  * This function will release the metadata space that was not used and will
5755  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5756  * list if there are no delalloc bytes left.
5757  */
5758 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5759 {
5760         btrfs_delalloc_release_metadata(inode, num_bytes);
5761         btrfs_free_reserved_data_space(inode, num_bytes);
5762 }
5763
5764 static int update_block_group(struct btrfs_trans_handle *trans,
5765                               struct btrfs_root *root, u64 bytenr,
5766                               u64 num_bytes, int alloc)
5767 {
5768         struct btrfs_block_group_cache *cache = NULL;
5769         struct btrfs_fs_info *info = root->fs_info;
5770         u64 total = num_bytes;
5771         u64 old_val;
5772         u64 byte_in_group;
5773         int factor;
5774
5775         /* block accounting for super block */
5776         spin_lock(&info->delalloc_root_lock);
5777         old_val = btrfs_super_bytes_used(info->super_copy);
5778         if (alloc)
5779                 old_val += num_bytes;
5780         else
5781                 old_val -= num_bytes;
5782         btrfs_set_super_bytes_used(info->super_copy, old_val);
5783         spin_unlock(&info->delalloc_root_lock);
5784
5785         while (total) {
5786                 cache = btrfs_lookup_block_group(info, bytenr);
5787                 if (!cache)
5788                         return -ENOENT;
5789                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5790                                     BTRFS_BLOCK_GROUP_RAID1 |
5791                                     BTRFS_BLOCK_GROUP_RAID10))
5792                         factor = 2;
5793                 else
5794                         factor = 1;
5795                 /*
5796                  * If this block group has free space cache written out, we
5797                  * need to make sure to load it if we are removing space.  This
5798                  * is because we need the unpinning stage to actually add the
5799                  * space back to the block group, otherwise we will leak space.
5800                  */
5801                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5802                         cache_block_group(cache, 1);
5803
5804                 byte_in_group = bytenr - cache->key.objectid;
5805                 WARN_ON(byte_in_group > cache->key.offset);
5806
5807                 spin_lock(&cache->space_info->lock);
5808                 spin_lock(&cache->lock);
5809
5810                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5811                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5812                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5813
5814                 old_val = btrfs_block_group_used(&cache->item);
5815                 num_bytes = min(total, cache->key.offset - byte_in_group);
5816                 if (alloc) {
5817                         old_val += num_bytes;
5818                         btrfs_set_block_group_used(&cache->item, old_val);
5819                         cache->reserved -= num_bytes;
5820                         cache->space_info->bytes_reserved -= num_bytes;
5821                         cache->space_info->bytes_used += num_bytes;
5822                         cache->space_info->disk_used += num_bytes * factor;
5823                         spin_unlock(&cache->lock);
5824                         spin_unlock(&cache->space_info->lock);
5825                 } else {
5826                         old_val -= num_bytes;
5827                         btrfs_set_block_group_used(&cache->item, old_val);
5828                         cache->pinned += num_bytes;
5829                         cache->space_info->bytes_pinned += num_bytes;
5830                         cache->space_info->bytes_used -= num_bytes;
5831                         cache->space_info->disk_used -= num_bytes * factor;
5832                         spin_unlock(&cache->lock);
5833                         spin_unlock(&cache->space_info->lock);
5834
5835                         set_extent_dirty(info->pinned_extents,
5836                                          bytenr, bytenr + num_bytes - 1,
5837                                          GFP_NOFS | __GFP_NOFAIL);
5838                         /*
5839                          * No longer have used bytes in this block group, queue
5840                          * it for deletion.
5841                          */
5842                         if (old_val == 0) {
5843                                 spin_lock(&info->unused_bgs_lock);
5844                                 if (list_empty(&cache->bg_list)) {
5845                                         btrfs_get_block_group(cache);
5846                                         list_add_tail(&cache->bg_list,
5847                                                       &info->unused_bgs);
5848                                 }
5849                                 spin_unlock(&info->unused_bgs_lock);
5850                         }
5851                 }
5852
5853                 spin_lock(&trans->transaction->dirty_bgs_lock);
5854                 if (list_empty(&cache->dirty_list)) {
5855                         list_add_tail(&cache->dirty_list,
5856                                       &trans->transaction->dirty_bgs);
5857                                 trans->transaction->num_dirty_bgs++;
5858                         btrfs_get_block_group(cache);
5859                 }
5860                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5861
5862                 btrfs_put_block_group(cache);
5863                 total -= num_bytes;
5864                 bytenr += num_bytes;
5865         }
5866         return 0;
5867 }
5868
5869 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5870 {
5871         struct btrfs_block_group_cache *cache;
5872         u64 bytenr;
5873
5874         spin_lock(&root->fs_info->block_group_cache_lock);
5875         bytenr = root->fs_info->first_logical_byte;
5876         spin_unlock(&root->fs_info->block_group_cache_lock);
5877
5878         if (bytenr < (u64)-1)
5879                 return bytenr;
5880
5881         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5882         if (!cache)
5883                 return 0;
5884
5885         bytenr = cache->key.objectid;
5886         btrfs_put_block_group(cache);
5887
5888         return bytenr;
5889 }
5890
5891 static int pin_down_extent(struct btrfs_root *root,
5892                            struct btrfs_block_group_cache *cache,
5893                            u64 bytenr, u64 num_bytes, int reserved)
5894 {
5895         spin_lock(&cache->space_info->lock);
5896         spin_lock(&cache->lock);
5897         cache->pinned += num_bytes;
5898         cache->space_info->bytes_pinned += num_bytes;
5899         if (reserved) {
5900                 cache->reserved -= num_bytes;
5901                 cache->space_info->bytes_reserved -= num_bytes;
5902         }
5903         spin_unlock(&cache->lock);
5904         spin_unlock(&cache->space_info->lock);
5905
5906         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5907                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5908         if (reserved)
5909                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5910         return 0;
5911 }
5912
5913 /*
5914  * this function must be called within transaction
5915  */
5916 int btrfs_pin_extent(struct btrfs_root *root,
5917                      u64 bytenr, u64 num_bytes, int reserved)
5918 {
5919         struct btrfs_block_group_cache *cache;
5920
5921         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5922         BUG_ON(!cache); /* Logic error */
5923
5924         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5925
5926         btrfs_put_block_group(cache);
5927         return 0;
5928 }
5929
5930 /*
5931  * this function must be called within transaction
5932  */
5933 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5934                                     u64 bytenr, u64 num_bytes)
5935 {
5936         struct btrfs_block_group_cache *cache;
5937         int ret;
5938
5939         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5940         if (!cache)
5941                 return -EINVAL;
5942
5943         /*
5944          * pull in the free space cache (if any) so that our pin
5945          * removes the free space from the cache.  We have load_only set
5946          * to one because the slow code to read in the free extents does check
5947          * the pinned extents.
5948          */
5949         cache_block_group(cache, 1);
5950
5951         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5952
5953         /* remove us from the free space cache (if we're there at all) */
5954         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5955         btrfs_put_block_group(cache);
5956         return ret;
5957 }
5958
5959 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5960 {
5961         int ret;
5962         struct btrfs_block_group_cache *block_group;
5963         struct btrfs_caching_control *caching_ctl;
5964
5965         block_group = btrfs_lookup_block_group(root->fs_info, start);
5966         if (!block_group)
5967                 return -EINVAL;
5968
5969         cache_block_group(block_group, 0);
5970         caching_ctl = get_caching_control(block_group);
5971
5972         if (!caching_ctl) {
5973                 /* Logic error */
5974                 BUG_ON(!block_group_cache_done(block_group));
5975                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5976         } else {
5977                 mutex_lock(&caching_ctl->mutex);
5978
5979                 if (start >= caching_ctl->progress) {
5980                         ret = add_excluded_extent(root, start, num_bytes);
5981                 } else if (start + num_bytes <= caching_ctl->progress) {
5982                         ret = btrfs_remove_free_space(block_group,
5983                                                       start, num_bytes);
5984                 } else {
5985                         num_bytes = caching_ctl->progress - start;
5986                         ret = btrfs_remove_free_space(block_group,
5987                                                       start, num_bytes);
5988                         if (ret)
5989                                 goto out_lock;
5990
5991                         num_bytes = (start + num_bytes) -
5992                                 caching_ctl->progress;
5993                         start = caching_ctl->progress;
5994                         ret = add_excluded_extent(root, start, num_bytes);
5995                 }
5996 out_lock:
5997                 mutex_unlock(&caching_ctl->mutex);
5998                 put_caching_control(caching_ctl);
5999         }
6000         btrfs_put_block_group(block_group);
6001         return ret;
6002 }
6003
6004 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6005                                  struct extent_buffer *eb)
6006 {
6007         struct btrfs_file_extent_item *item;
6008         struct btrfs_key key;
6009         int found_type;
6010         int i;
6011
6012         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6013                 return 0;
6014
6015         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6016                 btrfs_item_key_to_cpu(eb, &key, i);
6017                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6018                         continue;
6019                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6020                 found_type = btrfs_file_extent_type(eb, item);
6021                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6022                         continue;
6023                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6024                         continue;
6025                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6026                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6027                 __exclude_logged_extent(log, key.objectid, key.offset);
6028         }
6029
6030         return 0;
6031 }
6032
6033 /**
6034  * btrfs_update_reserved_bytes - update the block_group and space info counters
6035  * @cache:      The cache we are manipulating
6036  * @num_bytes:  The number of bytes in question
6037  * @reserve:    One of the reservation enums
6038  * @delalloc:   The blocks are allocated for the delalloc write
6039  *
6040  * This is called by the allocator when it reserves space, or by somebody who is
6041  * freeing space that was never actually used on disk.  For example if you
6042  * reserve some space for a new leaf in transaction A and before transaction A
6043  * commits you free that leaf, you call this with reserve set to 0 in order to
6044  * clear the reservation.
6045  *
6046  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6047  * ENOSPC accounting.  For data we handle the reservation through clearing the
6048  * delalloc bits in the io_tree.  We have to do this since we could end up
6049  * allocating less disk space for the amount of data we have reserved in the
6050  * case of compression.
6051  *
6052  * If this is a reservation and the block group has become read only we cannot
6053  * make the reservation and return -EAGAIN, otherwise this function always
6054  * succeeds.
6055  */
6056 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6057                                        u64 num_bytes, int reserve, int delalloc)
6058 {
6059         struct btrfs_space_info *space_info = cache->space_info;
6060         int ret = 0;
6061
6062         spin_lock(&space_info->lock);
6063         spin_lock(&cache->lock);
6064         if (reserve != RESERVE_FREE) {
6065                 if (cache->ro) {
6066                         ret = -EAGAIN;
6067                 } else {
6068                         cache->reserved += num_bytes;
6069                         space_info->bytes_reserved += num_bytes;
6070                         if (reserve == RESERVE_ALLOC) {
6071                                 trace_btrfs_space_reservation(cache->fs_info,
6072                                                 "space_info", space_info->flags,
6073                                                 num_bytes, 0);
6074                                 space_info->bytes_may_use -= num_bytes;
6075                         }
6076
6077                         if (delalloc)
6078                                 cache->delalloc_bytes += num_bytes;
6079                 }
6080         } else {
6081                 if (cache->ro)
6082                         space_info->bytes_readonly += num_bytes;
6083                 cache->reserved -= num_bytes;
6084                 space_info->bytes_reserved -= num_bytes;
6085
6086                 if (delalloc)
6087                         cache->delalloc_bytes -= num_bytes;
6088         }
6089         spin_unlock(&cache->lock);
6090         spin_unlock(&space_info->lock);
6091         return ret;
6092 }
6093
6094 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6095                                 struct btrfs_root *root)
6096 {
6097         struct btrfs_fs_info *fs_info = root->fs_info;
6098         struct btrfs_caching_control *next;
6099         struct btrfs_caching_control *caching_ctl;
6100         struct btrfs_block_group_cache *cache;
6101
6102         down_write(&fs_info->commit_root_sem);
6103
6104         list_for_each_entry_safe(caching_ctl, next,
6105                                  &fs_info->caching_block_groups, list) {
6106                 cache = caching_ctl->block_group;
6107                 if (block_group_cache_done(cache)) {
6108                         cache->last_byte_to_unpin = (u64)-1;
6109                         list_del_init(&caching_ctl->list);
6110                         put_caching_control(caching_ctl);
6111                 } else {
6112                         cache->last_byte_to_unpin = caching_ctl->progress;
6113                 }
6114         }
6115
6116         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6117                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6118         else
6119                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6120
6121         up_write(&fs_info->commit_root_sem);
6122
6123         update_global_block_rsv(fs_info);
6124 }
6125
6126 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6127                               const bool return_free_space)
6128 {
6129         struct btrfs_fs_info *fs_info = root->fs_info;
6130         struct btrfs_block_group_cache *cache = NULL;
6131         struct btrfs_space_info *space_info;
6132         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6133         u64 len;
6134         bool readonly;
6135
6136         while (start <= end) {
6137                 readonly = false;
6138                 if (!cache ||
6139                     start >= cache->key.objectid + cache->key.offset) {
6140                         if (cache)
6141                                 btrfs_put_block_group(cache);
6142                         cache = btrfs_lookup_block_group(fs_info, start);
6143                         BUG_ON(!cache); /* Logic error */
6144                 }
6145
6146                 len = cache->key.objectid + cache->key.offset - start;
6147                 len = min(len, end + 1 - start);
6148
6149                 if (start < cache->last_byte_to_unpin) {
6150                         len = min(len, cache->last_byte_to_unpin - start);
6151                         if (return_free_space)
6152                                 btrfs_add_free_space(cache, start, len);
6153                 }
6154
6155                 start += len;
6156                 space_info = cache->space_info;
6157
6158                 spin_lock(&space_info->lock);
6159                 spin_lock(&cache->lock);
6160                 cache->pinned -= len;
6161                 space_info->bytes_pinned -= len;
6162                 space_info->max_extent_size = 0;
6163                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6164                 if (cache->ro) {
6165                         space_info->bytes_readonly += len;
6166                         readonly = true;
6167                 }
6168                 spin_unlock(&cache->lock);
6169                 if (!readonly && global_rsv->space_info == space_info) {
6170                         spin_lock(&global_rsv->lock);
6171                         if (!global_rsv->full) {
6172                                 len = min(len, global_rsv->size -
6173                                           global_rsv->reserved);
6174                                 global_rsv->reserved += len;
6175                                 space_info->bytes_may_use += len;
6176                                 if (global_rsv->reserved >= global_rsv->size)
6177                                         global_rsv->full = 1;
6178                         }
6179                         spin_unlock(&global_rsv->lock);
6180                 }
6181                 spin_unlock(&space_info->lock);
6182         }
6183
6184         if (cache)
6185                 btrfs_put_block_group(cache);
6186         return 0;
6187 }
6188
6189 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6190                                struct btrfs_root *root)
6191 {
6192         struct btrfs_fs_info *fs_info = root->fs_info;
6193         struct btrfs_block_group_cache *block_group, *tmp;
6194         struct list_head *deleted_bgs;
6195         struct extent_io_tree *unpin;
6196         u64 start;
6197         u64 end;
6198         int ret;
6199
6200         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6201                 unpin = &fs_info->freed_extents[1];
6202         else
6203                 unpin = &fs_info->freed_extents[0];
6204
6205         while (!trans->aborted) {
6206                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6207                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6208                                             EXTENT_DIRTY, NULL);
6209                 if (ret) {
6210                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6211                         break;
6212                 }
6213
6214                 if (btrfs_test_opt(root, DISCARD))
6215                         ret = btrfs_discard_extent(root, start,
6216                                                    end + 1 - start, NULL);
6217
6218                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6219                 unpin_extent_range(root, start, end, true);
6220                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6221                 cond_resched();
6222         }
6223
6224         /*
6225          * Transaction is finished.  We don't need the lock anymore.  We
6226          * do need to clean up the block groups in case of a transaction
6227          * abort.
6228          */
6229         deleted_bgs = &trans->transaction->deleted_bgs;
6230         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6231                 u64 trimmed = 0;
6232
6233                 ret = -EROFS;
6234                 if (!trans->aborted)
6235                         ret = btrfs_discard_extent(root,
6236                                                    block_group->key.objectid,
6237                                                    block_group->key.offset,
6238                                                    &trimmed);
6239
6240                 list_del_init(&block_group->bg_list);
6241                 btrfs_put_block_group_trimming(block_group);
6242                 btrfs_put_block_group(block_group);
6243
6244                 if (ret) {
6245                         const char *errstr = btrfs_decode_error(ret);
6246                         btrfs_warn(fs_info,
6247                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6248                                    ret, errstr);
6249                 }
6250         }
6251
6252         return 0;
6253 }
6254
6255 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6256                              u64 owner, u64 root_objectid)
6257 {
6258         struct btrfs_space_info *space_info;
6259         u64 flags;
6260
6261         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6262                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6263                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6264                 else
6265                         flags = BTRFS_BLOCK_GROUP_METADATA;
6266         } else {
6267                 flags = BTRFS_BLOCK_GROUP_DATA;
6268         }
6269
6270         space_info = __find_space_info(fs_info, flags);
6271         BUG_ON(!space_info); /* Logic bug */
6272         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6273 }
6274
6275
6276 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6277                                 struct btrfs_root *root,
6278                                 struct btrfs_delayed_ref_node *node, u64 parent,
6279                                 u64 root_objectid, u64 owner_objectid,
6280                                 u64 owner_offset, int refs_to_drop,
6281                                 struct btrfs_delayed_extent_op *extent_op)
6282 {
6283         struct btrfs_key key;
6284         struct btrfs_path *path;
6285         struct btrfs_fs_info *info = root->fs_info;
6286         struct btrfs_root *extent_root = info->extent_root;
6287         struct extent_buffer *leaf;
6288         struct btrfs_extent_item *ei;
6289         struct btrfs_extent_inline_ref *iref;
6290         int ret;
6291         int is_data;
6292         int extent_slot = 0;
6293         int found_extent = 0;
6294         int num_to_del = 1;
6295         int no_quota = node->no_quota;
6296         u32 item_size;
6297         u64 refs;
6298         u64 bytenr = node->bytenr;
6299         u64 num_bytes = node->num_bytes;
6300         int last_ref = 0;
6301         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6302                                                  SKINNY_METADATA);
6303
6304         if (!info->quota_enabled || !is_fstree(root_objectid))
6305                 no_quota = 1;
6306
6307         path = btrfs_alloc_path();
6308         if (!path)
6309                 return -ENOMEM;
6310
6311         path->reada = 1;
6312         path->leave_spinning = 1;
6313
6314         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6315         BUG_ON(!is_data && refs_to_drop != 1);
6316
6317         if (is_data)
6318                 skinny_metadata = 0;
6319
6320         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6321                                     bytenr, num_bytes, parent,
6322                                     root_objectid, owner_objectid,
6323                                     owner_offset);
6324         if (ret == 0) {
6325                 extent_slot = path->slots[0];
6326                 while (extent_slot >= 0) {
6327                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6328                                               extent_slot);
6329                         if (key.objectid != bytenr)
6330                                 break;
6331                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6332                             key.offset == num_bytes) {
6333                                 found_extent = 1;
6334                                 break;
6335                         }
6336                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6337                             key.offset == owner_objectid) {
6338                                 found_extent = 1;
6339                                 break;
6340                         }
6341                         if (path->slots[0] - extent_slot > 5)
6342                                 break;
6343                         extent_slot--;
6344                 }
6345 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6346                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6347                 if (found_extent && item_size < sizeof(*ei))
6348                         found_extent = 0;
6349 #endif
6350                 if (!found_extent) {
6351                         BUG_ON(iref);
6352                         ret = remove_extent_backref(trans, extent_root, path,
6353                                                     NULL, refs_to_drop,
6354                                                     is_data, &last_ref);
6355                         if (ret) {
6356                                 btrfs_abort_transaction(trans, extent_root, ret);
6357                                 goto out;
6358                         }
6359                         btrfs_release_path(path);
6360                         path->leave_spinning = 1;
6361
6362                         key.objectid = bytenr;
6363                         key.type = BTRFS_EXTENT_ITEM_KEY;
6364                         key.offset = num_bytes;
6365
6366                         if (!is_data && skinny_metadata) {
6367                                 key.type = BTRFS_METADATA_ITEM_KEY;
6368                                 key.offset = owner_objectid;
6369                         }
6370
6371                         ret = btrfs_search_slot(trans, extent_root,
6372                                                 &key, path, -1, 1);
6373                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6374                                 /*
6375                                  * Couldn't find our skinny metadata item,
6376                                  * see if we have ye olde extent item.
6377                                  */
6378                                 path->slots[0]--;
6379                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6380                                                       path->slots[0]);
6381                                 if (key.objectid == bytenr &&
6382                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6383                                     key.offset == num_bytes)
6384                                         ret = 0;
6385                         }
6386
6387                         if (ret > 0 && skinny_metadata) {
6388                                 skinny_metadata = false;
6389                                 key.objectid = bytenr;
6390                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6391                                 key.offset = num_bytes;
6392                                 btrfs_release_path(path);
6393                                 ret = btrfs_search_slot(trans, extent_root,
6394                                                         &key, path, -1, 1);
6395                         }
6396
6397                         if (ret) {
6398                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6399                                         ret, bytenr);
6400                                 if (ret > 0)
6401                                         btrfs_print_leaf(extent_root,
6402                                                          path->nodes[0]);
6403                         }
6404                         if (ret < 0) {
6405                                 btrfs_abort_transaction(trans, extent_root, ret);
6406                                 goto out;
6407                         }
6408                         extent_slot = path->slots[0];
6409                 }
6410         } else if (WARN_ON(ret == -ENOENT)) {
6411                 btrfs_print_leaf(extent_root, path->nodes[0]);
6412                 btrfs_err(info,
6413                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6414                         bytenr, parent, root_objectid, owner_objectid,
6415                         owner_offset);
6416                 btrfs_abort_transaction(trans, extent_root, ret);
6417                 goto out;
6418         } else {
6419                 btrfs_abort_transaction(trans, extent_root, ret);
6420                 goto out;
6421         }
6422
6423         leaf = path->nodes[0];
6424         item_size = btrfs_item_size_nr(leaf, extent_slot);
6425 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6426         if (item_size < sizeof(*ei)) {
6427                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6428                 ret = convert_extent_item_v0(trans, extent_root, path,
6429                                              owner_objectid, 0);
6430                 if (ret < 0) {
6431                         btrfs_abort_transaction(trans, extent_root, ret);
6432                         goto out;
6433                 }
6434
6435                 btrfs_release_path(path);
6436                 path->leave_spinning = 1;
6437
6438                 key.objectid = bytenr;
6439                 key.type = BTRFS_EXTENT_ITEM_KEY;
6440                 key.offset = num_bytes;
6441
6442                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6443                                         -1, 1);
6444                 if (ret) {
6445                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6446                                 ret, bytenr);
6447                         btrfs_print_leaf(extent_root, path->nodes[0]);
6448                 }
6449                 if (ret < 0) {
6450                         btrfs_abort_transaction(trans, extent_root, ret);
6451                         goto out;
6452                 }
6453
6454                 extent_slot = path->slots[0];
6455                 leaf = path->nodes[0];
6456                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6457         }
6458 #endif
6459         BUG_ON(item_size < sizeof(*ei));
6460         ei = btrfs_item_ptr(leaf, extent_slot,
6461                             struct btrfs_extent_item);
6462         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6463             key.type == BTRFS_EXTENT_ITEM_KEY) {
6464                 struct btrfs_tree_block_info *bi;
6465                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6466                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6467                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6468         }
6469
6470         refs = btrfs_extent_refs(leaf, ei);
6471         if (refs < refs_to_drop) {
6472                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6473                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6474                 ret = -EINVAL;
6475                 btrfs_abort_transaction(trans, extent_root, ret);
6476                 goto out;
6477         }
6478         refs -= refs_to_drop;
6479
6480         if (refs > 0) {
6481                 if (extent_op)
6482                         __run_delayed_extent_op(extent_op, leaf, ei);
6483                 /*
6484                  * In the case of inline back ref, reference count will
6485                  * be updated by remove_extent_backref
6486                  */
6487                 if (iref) {
6488                         BUG_ON(!found_extent);
6489                 } else {
6490                         btrfs_set_extent_refs(leaf, ei, refs);
6491                         btrfs_mark_buffer_dirty(leaf);
6492                 }
6493                 if (found_extent) {
6494                         ret = remove_extent_backref(trans, extent_root, path,
6495                                                     iref, refs_to_drop,
6496                                                     is_data, &last_ref);
6497                         if (ret) {
6498                                 btrfs_abort_transaction(trans, extent_root, ret);
6499                                 goto out;
6500                         }
6501                 }
6502                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6503                                  root_objectid);
6504         } else {
6505                 if (found_extent) {
6506                         BUG_ON(is_data && refs_to_drop !=
6507                                extent_data_ref_count(path, iref));
6508                         if (iref) {
6509                                 BUG_ON(path->slots[0] != extent_slot);
6510                         } else {
6511                                 BUG_ON(path->slots[0] != extent_slot + 1);
6512                                 path->slots[0] = extent_slot;
6513                                 num_to_del = 2;
6514                         }
6515                 }
6516
6517                 last_ref = 1;
6518                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6519                                       num_to_del);
6520                 if (ret) {
6521                         btrfs_abort_transaction(trans, extent_root, ret);
6522                         goto out;
6523                 }
6524                 btrfs_release_path(path);
6525
6526                 if (is_data) {
6527                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6528                         if (ret) {
6529                                 btrfs_abort_transaction(trans, extent_root, ret);
6530                                 goto out;
6531                         }
6532                 }
6533
6534                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6535                 if (ret) {
6536                         btrfs_abort_transaction(trans, extent_root, ret);
6537                         goto out;
6538                 }
6539         }
6540         btrfs_release_path(path);
6541
6542 out:
6543         btrfs_free_path(path);
6544         return ret;
6545 }
6546
6547 /*
6548  * when we free an block, it is possible (and likely) that we free the last
6549  * delayed ref for that extent as well.  This searches the delayed ref tree for
6550  * a given extent, and if there are no other delayed refs to be processed, it
6551  * removes it from the tree.
6552  */
6553 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6554                                       struct btrfs_root *root, u64 bytenr)
6555 {
6556         struct btrfs_delayed_ref_head *head;
6557         struct btrfs_delayed_ref_root *delayed_refs;
6558         int ret = 0;
6559
6560         delayed_refs = &trans->transaction->delayed_refs;
6561         spin_lock(&delayed_refs->lock);
6562         head = btrfs_find_delayed_ref_head(trans, bytenr);
6563         if (!head)
6564                 goto out_delayed_unlock;
6565
6566         spin_lock(&head->lock);
6567         if (!list_empty(&head->ref_list))
6568                 goto out;
6569
6570         if (head->extent_op) {
6571                 if (!head->must_insert_reserved)
6572                         goto out;
6573                 btrfs_free_delayed_extent_op(head->extent_op);
6574                 head->extent_op = NULL;
6575         }
6576
6577         /*
6578          * waiting for the lock here would deadlock.  If someone else has it
6579          * locked they are already in the process of dropping it anyway
6580          */
6581         if (!mutex_trylock(&head->mutex))
6582                 goto out;
6583
6584         /*
6585          * at this point we have a head with no other entries.  Go
6586          * ahead and process it.
6587          */
6588         head->node.in_tree = 0;
6589         rb_erase(&head->href_node, &delayed_refs->href_root);
6590
6591         atomic_dec(&delayed_refs->num_entries);
6592
6593         /*
6594          * we don't take a ref on the node because we're removing it from the
6595          * tree, so we just steal the ref the tree was holding.
6596          */
6597         delayed_refs->num_heads--;
6598         if (head->processing == 0)
6599                 delayed_refs->num_heads_ready--;
6600         head->processing = 0;
6601         spin_unlock(&head->lock);
6602         spin_unlock(&delayed_refs->lock);
6603
6604         BUG_ON(head->extent_op);
6605         if (head->must_insert_reserved)
6606                 ret = 1;
6607
6608         mutex_unlock(&head->mutex);
6609         btrfs_put_delayed_ref(&head->node);
6610         return ret;
6611 out:
6612         spin_unlock(&head->lock);
6613
6614 out_delayed_unlock:
6615         spin_unlock(&delayed_refs->lock);
6616         return 0;
6617 }
6618
6619 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6620                            struct btrfs_root *root,
6621                            struct extent_buffer *buf,
6622                            u64 parent, int last_ref)
6623 {
6624         int pin = 1;
6625         int ret;
6626
6627         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6628                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6629                                         buf->start, buf->len,
6630                                         parent, root->root_key.objectid,
6631                                         btrfs_header_level(buf),
6632                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6633                 BUG_ON(ret); /* -ENOMEM */
6634         }
6635
6636         if (!last_ref)
6637                 return;
6638
6639         if (btrfs_header_generation(buf) == trans->transid) {
6640                 struct btrfs_block_group_cache *cache;
6641
6642                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6643                         ret = check_ref_cleanup(trans, root, buf->start);
6644                         if (!ret)
6645                                 goto out;
6646                 }
6647
6648                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6649
6650                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6651                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6652                         btrfs_put_block_group(cache);
6653                         goto out;
6654                 }
6655
6656                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6657
6658                 btrfs_add_free_space(cache, buf->start, buf->len);
6659                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6660                 btrfs_put_block_group(cache);
6661                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6662                 pin = 0;
6663         }
6664 out:
6665         if (pin)
6666                 add_pinned_bytes(root->fs_info, buf->len,
6667                                  btrfs_header_level(buf),
6668                                  root->root_key.objectid);
6669
6670         /*
6671          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6672          * anymore.
6673          */
6674         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6675 }
6676
6677 /* Can return -ENOMEM */
6678 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6679                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6680                       u64 owner, u64 offset, int no_quota)
6681 {
6682         int ret;
6683         struct btrfs_fs_info *fs_info = root->fs_info;
6684
6685         if (btrfs_test_is_dummy_root(root))
6686                 return 0;
6687
6688         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6689
6690         /*
6691          * tree log blocks never actually go into the extent allocation
6692          * tree, just update pinning info and exit early.
6693          */
6694         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6695                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6696                 /* unlocks the pinned mutex */
6697                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6698                 ret = 0;
6699         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6700                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6701                                         num_bytes,
6702                                         parent, root_objectid, (int)owner,
6703                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6704         } else {
6705                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6706                                                 num_bytes,
6707                                                 parent, root_objectid, owner,
6708                                                 offset, BTRFS_DROP_DELAYED_REF,
6709                                                 NULL, no_quota);
6710         }
6711         return ret;
6712 }
6713
6714 /*
6715  * when we wait for progress in the block group caching, its because
6716  * our allocation attempt failed at least once.  So, we must sleep
6717  * and let some progress happen before we try again.
6718  *
6719  * This function will sleep at least once waiting for new free space to
6720  * show up, and then it will check the block group free space numbers
6721  * for our min num_bytes.  Another option is to have it go ahead
6722  * and look in the rbtree for a free extent of a given size, but this
6723  * is a good start.
6724  *
6725  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6726  * any of the information in this block group.
6727  */
6728 static noinline void
6729 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6730                                 u64 num_bytes)
6731 {
6732         struct btrfs_caching_control *caching_ctl;
6733
6734         caching_ctl = get_caching_control(cache);
6735         if (!caching_ctl)
6736                 return;
6737
6738         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6739                    (cache->free_space_ctl->free_space >= num_bytes));
6740
6741         put_caching_control(caching_ctl);
6742 }
6743
6744 static noinline int
6745 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6746 {
6747         struct btrfs_caching_control *caching_ctl;
6748         int ret = 0;
6749
6750         caching_ctl = get_caching_control(cache);
6751         if (!caching_ctl)
6752                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6753
6754         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6755         if (cache->cached == BTRFS_CACHE_ERROR)
6756                 ret = -EIO;
6757         put_caching_control(caching_ctl);
6758         return ret;
6759 }
6760
6761 int __get_raid_index(u64 flags)
6762 {
6763         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6764                 return BTRFS_RAID_RAID10;
6765         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6766                 return BTRFS_RAID_RAID1;
6767         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6768                 return BTRFS_RAID_DUP;
6769         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6770                 return BTRFS_RAID_RAID0;
6771         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6772                 return BTRFS_RAID_RAID5;
6773         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6774                 return BTRFS_RAID_RAID6;
6775
6776         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6777 }
6778
6779 int get_block_group_index(struct btrfs_block_group_cache *cache)
6780 {
6781         return __get_raid_index(cache->flags);
6782 }
6783
6784 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6785         [BTRFS_RAID_RAID10]     = "raid10",
6786         [BTRFS_RAID_RAID1]      = "raid1",
6787         [BTRFS_RAID_DUP]        = "dup",
6788         [BTRFS_RAID_RAID0]      = "raid0",
6789         [BTRFS_RAID_SINGLE]     = "single",
6790         [BTRFS_RAID_RAID5]      = "raid5",
6791         [BTRFS_RAID_RAID6]      = "raid6",
6792 };
6793
6794 static const char *get_raid_name(enum btrfs_raid_types type)
6795 {
6796         if (type >= BTRFS_NR_RAID_TYPES)
6797                 return NULL;
6798
6799         return btrfs_raid_type_names[type];
6800 }
6801
6802 enum btrfs_loop_type {
6803         LOOP_CACHING_NOWAIT = 0,
6804         LOOP_CACHING_WAIT = 1,
6805         LOOP_ALLOC_CHUNK = 2,
6806         LOOP_NO_EMPTY_SIZE = 3,
6807 };
6808
6809 static inline void
6810 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6811                        int delalloc)
6812 {
6813         if (delalloc)
6814                 down_read(&cache->data_rwsem);
6815 }
6816
6817 static inline void
6818 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6819                        int delalloc)
6820 {
6821         btrfs_get_block_group(cache);
6822         if (delalloc)
6823                 down_read(&cache->data_rwsem);
6824 }
6825
6826 static struct btrfs_block_group_cache *
6827 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6828                    struct btrfs_free_cluster *cluster,
6829                    int delalloc)
6830 {
6831         struct btrfs_block_group_cache *used_bg;
6832         bool locked = false;
6833 again:
6834         spin_lock(&cluster->refill_lock);
6835         if (locked) {
6836                 if (used_bg == cluster->block_group)
6837                         return used_bg;
6838
6839                 up_read(&used_bg->data_rwsem);
6840                 btrfs_put_block_group(used_bg);
6841         }
6842
6843         used_bg = cluster->block_group;
6844         if (!used_bg)
6845                 return NULL;
6846
6847         if (used_bg == block_group)
6848                 return used_bg;
6849
6850         btrfs_get_block_group(used_bg);
6851
6852         if (!delalloc)
6853                 return used_bg;
6854
6855         if (down_read_trylock(&used_bg->data_rwsem))
6856                 return used_bg;
6857
6858         spin_unlock(&cluster->refill_lock);
6859         down_read(&used_bg->data_rwsem);
6860         locked = true;
6861         goto again;
6862 }
6863
6864 static inline void
6865 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6866                          int delalloc)
6867 {
6868         if (delalloc)
6869                 up_read(&cache->data_rwsem);
6870         btrfs_put_block_group(cache);
6871 }
6872
6873 /*
6874  * walks the btree of allocated extents and find a hole of a given size.
6875  * The key ins is changed to record the hole:
6876  * ins->objectid == start position
6877  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6878  * ins->offset == the size of the hole.
6879  * Any available blocks before search_start are skipped.
6880  *
6881  * If there is no suitable free space, we will record the max size of
6882  * the free space extent currently.
6883  */
6884 static noinline int find_free_extent(struct btrfs_root *orig_root,
6885                                      u64 num_bytes, u64 empty_size,
6886                                      u64 hint_byte, struct btrfs_key *ins,
6887                                      u64 flags, int delalloc)
6888 {
6889         int ret = 0;
6890         struct btrfs_root *root = orig_root->fs_info->extent_root;
6891         struct btrfs_free_cluster *last_ptr = NULL;
6892         struct btrfs_block_group_cache *block_group = NULL;
6893         u64 search_start = 0;
6894         u64 max_extent_size = 0;
6895         int empty_cluster = 2 * 1024 * 1024;
6896         struct btrfs_space_info *space_info;
6897         int loop = 0;
6898         int index = __get_raid_index(flags);
6899         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6900                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6901         bool failed_cluster_refill = false;
6902         bool failed_alloc = false;
6903         bool use_cluster = true;
6904         bool have_caching_bg = false;
6905
6906         WARN_ON(num_bytes < root->sectorsize);
6907         ins->type = BTRFS_EXTENT_ITEM_KEY;
6908         ins->objectid = 0;
6909         ins->offset = 0;
6910
6911         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6912
6913         space_info = __find_space_info(root->fs_info, flags);
6914         if (!space_info) {
6915                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6916                 return -ENOSPC;
6917         }
6918
6919         /*
6920          * If our free space is heavily fragmented we may not be able to make
6921          * big contiguous allocations, so instead of doing the expensive search
6922          * for free space, simply return ENOSPC with our max_extent_size so we
6923          * can go ahead and search for a more manageable chunk.
6924          *
6925          * If our max_extent_size is large enough for our allocation simply
6926          * disable clustering since we will likely not be able to find enough
6927          * space to create a cluster and induce latency trying.
6928          */
6929         if (unlikely(space_info->max_extent_size)) {
6930                 spin_lock(&space_info->lock);
6931                 if (space_info->max_extent_size &&
6932                     num_bytes > space_info->max_extent_size) {
6933                         ins->offset = space_info->max_extent_size;
6934                         spin_unlock(&space_info->lock);
6935                         return -ENOSPC;
6936                 } else if (space_info->max_extent_size) {
6937                         use_cluster = false;
6938                 }
6939                 spin_unlock(&space_info->lock);
6940         }
6941
6942         /*
6943          * If the space info is for both data and metadata it means we have a
6944          * small filesystem and we can't use the clustering stuff.
6945          */
6946         if (btrfs_mixed_space_info(space_info))
6947                 use_cluster = false;
6948
6949         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6950                 last_ptr = &root->fs_info->meta_alloc_cluster;
6951                 if (!btrfs_test_opt(root, SSD))
6952                         empty_cluster = 64 * 1024;
6953         }
6954
6955         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6956             btrfs_test_opt(root, SSD)) {
6957                 last_ptr = &root->fs_info->data_alloc_cluster;
6958         }
6959
6960         if (last_ptr) {
6961                 spin_lock(&last_ptr->lock);
6962                 if (last_ptr->block_group)
6963                         hint_byte = last_ptr->window_start;
6964                 spin_unlock(&last_ptr->lock);
6965         }
6966
6967         search_start = max(search_start, first_logical_byte(root, 0));
6968         search_start = max(search_start, hint_byte);
6969
6970         if (!last_ptr)
6971                 empty_cluster = 0;
6972
6973         if (search_start == hint_byte) {
6974                 block_group = btrfs_lookup_block_group(root->fs_info,
6975                                                        search_start);
6976                 /*
6977                  * we don't want to use the block group if it doesn't match our
6978                  * allocation bits, or if its not cached.
6979                  *
6980                  * However if we are re-searching with an ideal block group
6981                  * picked out then we don't care that the block group is cached.
6982                  */
6983                 if (block_group && block_group_bits(block_group, flags) &&
6984                     block_group->cached != BTRFS_CACHE_NO) {
6985                         down_read(&space_info->groups_sem);
6986                         if (list_empty(&block_group->list) ||
6987                             block_group->ro) {
6988                                 /*
6989                                  * someone is removing this block group,
6990                                  * we can't jump into the have_block_group
6991                                  * target because our list pointers are not
6992                                  * valid
6993                                  */
6994                                 btrfs_put_block_group(block_group);
6995                                 up_read(&space_info->groups_sem);
6996                         } else {
6997                                 index = get_block_group_index(block_group);
6998                                 btrfs_lock_block_group(block_group, delalloc);
6999                                 goto have_block_group;
7000                         }
7001                 } else if (block_group) {
7002                         btrfs_put_block_group(block_group);
7003                 }
7004         }
7005 search:
7006         have_caching_bg = false;
7007         down_read(&space_info->groups_sem);
7008         list_for_each_entry(block_group, &space_info->block_groups[index],
7009                             list) {
7010                 u64 offset;
7011                 int cached;
7012
7013                 btrfs_grab_block_group(block_group, delalloc);
7014                 search_start = block_group->key.objectid;
7015
7016                 /*
7017                  * this can happen if we end up cycling through all the
7018                  * raid types, but we want to make sure we only allocate
7019                  * for the proper type.
7020                  */
7021                 if (!block_group_bits(block_group, flags)) {
7022                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7023                                 BTRFS_BLOCK_GROUP_RAID1 |
7024                                 BTRFS_BLOCK_GROUP_RAID5 |
7025                                 BTRFS_BLOCK_GROUP_RAID6 |
7026                                 BTRFS_BLOCK_GROUP_RAID10;
7027
7028                         /*
7029                          * if they asked for extra copies and this block group
7030                          * doesn't provide them, bail.  This does allow us to
7031                          * fill raid0 from raid1.
7032                          */
7033                         if ((flags & extra) && !(block_group->flags & extra))
7034                                 goto loop;
7035                 }
7036
7037 have_block_group:
7038                 cached = block_group_cache_done(block_group);
7039                 if (unlikely(!cached)) {
7040                         ret = cache_block_group(block_group, 0);
7041                         BUG_ON(ret < 0);
7042                         ret = 0;
7043                 }
7044
7045                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7046                         goto loop;
7047                 if (unlikely(block_group->ro))
7048                         goto loop;
7049
7050                 /*
7051                  * Ok we want to try and use the cluster allocator, so
7052                  * lets look there
7053                  */
7054                 if (last_ptr) {
7055                         struct btrfs_block_group_cache *used_block_group;
7056                         unsigned long aligned_cluster;
7057                         /*
7058                          * the refill lock keeps out other
7059                          * people trying to start a new cluster
7060                          */
7061                         used_block_group = btrfs_lock_cluster(block_group,
7062                                                               last_ptr,
7063                                                               delalloc);
7064                         if (!used_block_group)
7065                                 goto refill_cluster;
7066
7067                         if (used_block_group != block_group &&
7068                             (used_block_group->ro ||
7069                              !block_group_bits(used_block_group, flags)))
7070                                 goto release_cluster;
7071
7072                         offset = btrfs_alloc_from_cluster(used_block_group,
7073                                                 last_ptr,
7074                                                 num_bytes,
7075                                                 used_block_group->key.objectid,
7076                                                 &max_extent_size);
7077                         if (offset) {
7078                                 /* we have a block, we're done */
7079                                 spin_unlock(&last_ptr->refill_lock);
7080                                 trace_btrfs_reserve_extent_cluster(root,
7081                                                 used_block_group,
7082                                                 search_start, num_bytes);
7083                                 if (used_block_group != block_group) {
7084                                         btrfs_release_block_group(block_group,
7085                                                                   delalloc);
7086                                         block_group = used_block_group;
7087                                 }
7088                                 goto checks;
7089                         }
7090
7091                         WARN_ON(last_ptr->block_group != used_block_group);
7092 release_cluster:
7093                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7094                          * set up a new clusters, so lets just skip it
7095                          * and let the allocator find whatever block
7096                          * it can find.  If we reach this point, we
7097                          * will have tried the cluster allocator
7098                          * plenty of times and not have found
7099                          * anything, so we are likely way too
7100                          * fragmented for the clustering stuff to find
7101                          * anything.
7102                          *
7103                          * However, if the cluster is taken from the
7104                          * current block group, release the cluster
7105                          * first, so that we stand a better chance of
7106                          * succeeding in the unclustered
7107                          * allocation.  */
7108                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7109                             used_block_group != block_group) {
7110                                 spin_unlock(&last_ptr->refill_lock);
7111                                 btrfs_release_block_group(used_block_group,
7112                                                           delalloc);
7113                                 goto unclustered_alloc;
7114                         }
7115
7116                         /*
7117                          * this cluster didn't work out, free it and
7118                          * start over
7119                          */
7120                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7121
7122                         if (used_block_group != block_group)
7123                                 btrfs_release_block_group(used_block_group,
7124                                                           delalloc);
7125 refill_cluster:
7126                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7127                                 spin_unlock(&last_ptr->refill_lock);
7128                                 goto unclustered_alloc;
7129                         }
7130
7131                         aligned_cluster = max_t(unsigned long,
7132                                                 empty_cluster + empty_size,
7133                                               block_group->full_stripe_len);
7134
7135                         /* allocate a cluster in this block group */
7136                         ret = btrfs_find_space_cluster(root, block_group,
7137                                                        last_ptr, search_start,
7138                                                        num_bytes,
7139                                                        aligned_cluster);
7140                         if (ret == 0) {
7141                                 /*
7142                                  * now pull our allocation out of this
7143                                  * cluster
7144                                  */
7145                                 offset = btrfs_alloc_from_cluster(block_group,
7146                                                         last_ptr,
7147                                                         num_bytes,
7148                                                         search_start,
7149                                                         &max_extent_size);
7150                                 if (offset) {
7151                                         /* we found one, proceed */
7152                                         spin_unlock(&last_ptr->refill_lock);
7153                                         trace_btrfs_reserve_extent_cluster(root,
7154                                                 block_group, search_start,
7155                                                 num_bytes);
7156                                         goto checks;
7157                                 }
7158                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7159                                    && !failed_cluster_refill) {
7160                                 spin_unlock(&last_ptr->refill_lock);
7161
7162                                 failed_cluster_refill = true;
7163                                 wait_block_group_cache_progress(block_group,
7164                                        num_bytes + empty_cluster + empty_size);
7165                                 goto have_block_group;
7166                         }
7167
7168                         /*
7169                          * at this point we either didn't find a cluster
7170                          * or we weren't able to allocate a block from our
7171                          * cluster.  Free the cluster we've been trying
7172                          * to use, and go to the next block group
7173                          */
7174                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7175                         spin_unlock(&last_ptr->refill_lock);
7176                         goto loop;
7177                 }
7178
7179 unclustered_alloc:
7180                 spin_lock(&block_group->free_space_ctl->tree_lock);
7181                 if (cached &&
7182                     block_group->free_space_ctl->free_space <
7183                     num_bytes + empty_cluster + empty_size) {
7184                         if (block_group->free_space_ctl->free_space >
7185                             max_extent_size)
7186                                 max_extent_size =
7187                                         block_group->free_space_ctl->free_space;
7188                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7189                         goto loop;
7190                 }
7191                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7192
7193                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7194                                                     num_bytes, empty_size,
7195                                                     &max_extent_size);
7196                 /*
7197                  * If we didn't find a chunk, and we haven't failed on this
7198                  * block group before, and this block group is in the middle of
7199                  * caching and we are ok with waiting, then go ahead and wait
7200                  * for progress to be made, and set failed_alloc to true.
7201                  *
7202                  * If failed_alloc is true then we've already waited on this
7203                  * block group once and should move on to the next block group.
7204                  */
7205                 if (!offset && !failed_alloc && !cached &&
7206                     loop > LOOP_CACHING_NOWAIT) {
7207                         wait_block_group_cache_progress(block_group,
7208                                                 num_bytes + empty_size);
7209                         failed_alloc = true;
7210                         goto have_block_group;
7211                 } else if (!offset) {
7212                         if (!cached)
7213                                 have_caching_bg = true;
7214                         goto loop;
7215                 }
7216 checks:
7217                 search_start = ALIGN(offset, root->stripesize);
7218
7219                 /* move on to the next group */
7220                 if (search_start + num_bytes >
7221                     block_group->key.objectid + block_group->key.offset) {
7222                         btrfs_add_free_space(block_group, offset, num_bytes);
7223                         goto loop;
7224                 }
7225
7226                 if (offset < search_start)
7227                         btrfs_add_free_space(block_group, offset,
7228                                              search_start - offset);
7229                 BUG_ON(offset > search_start);
7230
7231                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7232                                                   alloc_type, delalloc);
7233                 if (ret == -EAGAIN) {
7234                         btrfs_add_free_space(block_group, offset, num_bytes);
7235                         goto loop;
7236                 }
7237
7238                 /* we are all good, lets return */
7239                 ins->objectid = search_start;
7240                 ins->offset = num_bytes;
7241
7242                 trace_btrfs_reserve_extent(orig_root, block_group,
7243                                            search_start, num_bytes);
7244                 btrfs_release_block_group(block_group, delalloc);
7245                 break;
7246 loop:
7247                 failed_cluster_refill = false;
7248                 failed_alloc = false;
7249                 BUG_ON(index != get_block_group_index(block_group));
7250                 btrfs_release_block_group(block_group, delalloc);
7251         }
7252         up_read(&space_info->groups_sem);
7253
7254         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7255                 goto search;
7256
7257         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7258                 goto search;
7259
7260         /*
7261          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7262          *                      caching kthreads as we move along
7263          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7264          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7265          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7266          *                      again
7267          */
7268         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7269                 index = 0;
7270                 loop++;
7271                 if (loop == LOOP_ALLOC_CHUNK) {
7272                         struct btrfs_trans_handle *trans;
7273                         int exist = 0;
7274
7275                         trans = current->journal_info;
7276                         if (trans)
7277                                 exist = 1;
7278                         else
7279                                 trans = btrfs_join_transaction(root);
7280
7281                         if (IS_ERR(trans)) {
7282                                 ret = PTR_ERR(trans);
7283                                 goto out;
7284                         }
7285
7286                         ret = do_chunk_alloc(trans, root, flags,
7287                                              CHUNK_ALLOC_FORCE);
7288                         /*
7289                          * Do not bail out on ENOSPC since we
7290                          * can do more things.
7291                          */
7292                         if (ret < 0 && ret != -ENOSPC)
7293                                 btrfs_abort_transaction(trans,
7294                                                         root, ret);
7295                         else
7296                                 ret = 0;
7297                         if (!exist)
7298                                 btrfs_end_transaction(trans, root);
7299                         if (ret)
7300                                 goto out;
7301                 }
7302
7303                 if (loop == LOOP_NO_EMPTY_SIZE) {
7304                         empty_size = 0;
7305                         empty_cluster = 0;
7306                 }
7307
7308                 goto search;
7309         } else if (!ins->objectid) {
7310                 ret = -ENOSPC;
7311         } else if (ins->objectid) {
7312                 ret = 0;
7313         }
7314 out:
7315         if (ret == -ENOSPC) {
7316                 spin_lock(&space_info->lock);
7317                 space_info->max_extent_size = max_extent_size;
7318                 spin_unlock(&space_info->lock);
7319                 ins->offset = max_extent_size;
7320         }
7321         return ret;
7322 }
7323
7324 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7325                             int dump_block_groups)
7326 {
7327         struct btrfs_block_group_cache *cache;
7328         int index = 0;
7329
7330         spin_lock(&info->lock);
7331         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7332                info->flags,
7333                info->total_bytes - info->bytes_used - info->bytes_pinned -
7334                info->bytes_reserved - info->bytes_readonly,
7335                (info->full) ? "" : "not ");
7336         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7337                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7338                info->total_bytes, info->bytes_used, info->bytes_pinned,
7339                info->bytes_reserved, info->bytes_may_use,
7340                info->bytes_readonly);
7341         spin_unlock(&info->lock);
7342
7343         if (!dump_block_groups)
7344                 return;
7345
7346         down_read(&info->groups_sem);
7347 again:
7348         list_for_each_entry(cache, &info->block_groups[index], list) {
7349                 spin_lock(&cache->lock);
7350                 printk(KERN_INFO "BTRFS: "
7351                            "block group %llu has %llu bytes, "
7352                            "%llu used %llu pinned %llu reserved %s\n",
7353                        cache->key.objectid, cache->key.offset,
7354                        btrfs_block_group_used(&cache->item), cache->pinned,
7355                        cache->reserved, cache->ro ? "[readonly]" : "");
7356                 btrfs_dump_free_space(cache, bytes);
7357                 spin_unlock(&cache->lock);
7358         }
7359         if (++index < BTRFS_NR_RAID_TYPES)
7360                 goto again;
7361         up_read(&info->groups_sem);
7362 }
7363
7364 int btrfs_reserve_extent(struct btrfs_root *root,
7365                          u64 num_bytes, u64 min_alloc_size,
7366                          u64 empty_size, u64 hint_byte,
7367                          struct btrfs_key *ins, int is_data, int delalloc)
7368 {
7369         bool final_tried = num_bytes == min_alloc_size;
7370         u64 flags;
7371         int ret;
7372
7373         flags = btrfs_get_alloc_profile(root, is_data);
7374 again:
7375         WARN_ON(num_bytes < root->sectorsize);
7376         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7377                                flags, delalloc);
7378
7379         if (ret == -ENOSPC) {
7380                 if (!final_tried && ins->offset) {
7381                         num_bytes = min(num_bytes >> 1, ins->offset);
7382                         num_bytes = round_down(num_bytes, root->sectorsize);
7383                         num_bytes = max(num_bytes, min_alloc_size);
7384                         if (num_bytes == min_alloc_size)
7385                                 final_tried = true;
7386                         goto again;
7387                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7388                         struct btrfs_space_info *sinfo;
7389
7390                         sinfo = __find_space_info(root->fs_info, flags);
7391                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7392                                 flags, num_bytes);
7393                         if (sinfo)
7394                                 dump_space_info(sinfo, num_bytes, 1);
7395                 }
7396         }
7397
7398         return ret;
7399 }
7400
7401 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7402                                         u64 start, u64 len,
7403                                         int pin, int delalloc)
7404 {
7405         struct btrfs_block_group_cache *cache;
7406         int ret = 0;
7407
7408         cache = btrfs_lookup_block_group(root->fs_info, start);
7409         if (!cache) {
7410                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7411                         start);
7412                 return -ENOSPC;
7413         }
7414
7415         if (pin)
7416                 pin_down_extent(root, cache, start, len, 1);
7417         else {
7418                 if (btrfs_test_opt(root, DISCARD))
7419                         ret = btrfs_discard_extent(root, start, len, NULL);
7420                 btrfs_add_free_space(cache, start, len);
7421                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7422         }
7423
7424         btrfs_put_block_group(cache);
7425
7426         trace_btrfs_reserved_extent_free(root, start, len);
7427
7428         return ret;
7429 }
7430
7431 int btrfs_free_reserved_extent(struct btrfs_root *root,
7432                                u64 start, u64 len, int delalloc)
7433 {
7434         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7435 }
7436
7437 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7438                                        u64 start, u64 len)
7439 {
7440         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7441 }
7442
7443 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7444                                       struct btrfs_root *root,
7445                                       u64 parent, u64 root_objectid,
7446                                       u64 flags, u64 owner, u64 offset,
7447                                       struct btrfs_key *ins, int ref_mod)
7448 {
7449         int ret;
7450         struct btrfs_fs_info *fs_info = root->fs_info;
7451         struct btrfs_extent_item *extent_item;
7452         struct btrfs_extent_inline_ref *iref;
7453         struct btrfs_path *path;
7454         struct extent_buffer *leaf;
7455         int type;
7456         u32 size;
7457
7458         if (parent > 0)
7459                 type = BTRFS_SHARED_DATA_REF_KEY;
7460         else
7461                 type = BTRFS_EXTENT_DATA_REF_KEY;
7462
7463         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7464
7465         path = btrfs_alloc_path();
7466         if (!path)
7467                 return -ENOMEM;
7468
7469         path->leave_spinning = 1;
7470         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7471                                       ins, size);
7472         if (ret) {
7473                 btrfs_free_path(path);
7474                 return ret;
7475         }
7476
7477         leaf = path->nodes[0];
7478         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7479                                      struct btrfs_extent_item);
7480         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7481         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7482         btrfs_set_extent_flags(leaf, extent_item,
7483                                flags | BTRFS_EXTENT_FLAG_DATA);
7484
7485         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7486         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7487         if (parent > 0) {
7488                 struct btrfs_shared_data_ref *ref;
7489                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7490                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7491                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7492         } else {
7493                 struct btrfs_extent_data_ref *ref;
7494                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7495                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7496                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7497                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7498                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7499         }
7500
7501         btrfs_mark_buffer_dirty(path->nodes[0]);
7502         btrfs_free_path(path);
7503
7504         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7505         if (ret) { /* -ENOENT, logic error */
7506                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7507                         ins->objectid, ins->offset);
7508                 BUG();
7509         }
7510         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7511         return ret;
7512 }
7513
7514 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7515                                      struct btrfs_root *root,
7516                                      u64 parent, u64 root_objectid,
7517                                      u64 flags, struct btrfs_disk_key *key,
7518                                      int level, struct btrfs_key *ins,
7519                                      int no_quota)
7520 {
7521         int ret;
7522         struct btrfs_fs_info *fs_info = root->fs_info;
7523         struct btrfs_extent_item *extent_item;
7524         struct btrfs_tree_block_info *block_info;
7525         struct btrfs_extent_inline_ref *iref;
7526         struct btrfs_path *path;
7527         struct extent_buffer *leaf;
7528         u32 size = sizeof(*extent_item) + sizeof(*iref);
7529         u64 num_bytes = ins->offset;
7530         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7531                                                  SKINNY_METADATA);
7532
7533         if (!skinny_metadata)
7534                 size += sizeof(*block_info);
7535
7536         path = btrfs_alloc_path();
7537         if (!path) {
7538                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7539                                                    root->nodesize);
7540                 return -ENOMEM;
7541         }
7542
7543         path->leave_spinning = 1;
7544         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7545                                       ins, size);
7546         if (ret) {
7547                 btrfs_free_path(path);
7548                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7549                                                    root->nodesize);
7550                 return ret;
7551         }
7552
7553         leaf = path->nodes[0];
7554         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7555                                      struct btrfs_extent_item);
7556         btrfs_set_extent_refs(leaf, extent_item, 1);
7557         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7558         btrfs_set_extent_flags(leaf, extent_item,
7559                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7560
7561         if (skinny_metadata) {
7562                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7563                 num_bytes = root->nodesize;
7564         } else {
7565                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7566                 btrfs_set_tree_block_key(leaf, block_info, key);
7567                 btrfs_set_tree_block_level(leaf, block_info, level);
7568                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7569         }
7570
7571         if (parent > 0) {
7572                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7573                 btrfs_set_extent_inline_ref_type(leaf, iref,
7574                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7575                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7576         } else {
7577                 btrfs_set_extent_inline_ref_type(leaf, iref,
7578                                                  BTRFS_TREE_BLOCK_REF_KEY);
7579                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7580         }
7581
7582         btrfs_mark_buffer_dirty(leaf);
7583         btrfs_free_path(path);
7584
7585         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7586                                  1);
7587         if (ret) { /* -ENOENT, logic error */
7588                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7589                         ins->objectid, ins->offset);
7590                 BUG();
7591         }
7592
7593         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7594         return ret;
7595 }
7596
7597 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7598                                      struct btrfs_root *root,
7599                                      u64 root_objectid, u64 owner,
7600                                      u64 offset, struct btrfs_key *ins)
7601 {
7602         int ret;
7603
7604         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7605
7606         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7607                                          ins->offset, 0,
7608                                          root_objectid, owner, offset,
7609                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7610         return ret;
7611 }
7612
7613 /*
7614  * this is used by the tree logging recovery code.  It records that
7615  * an extent has been allocated and makes sure to clear the free
7616  * space cache bits as well
7617  */
7618 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7619                                    struct btrfs_root *root,
7620                                    u64 root_objectid, u64 owner, u64 offset,
7621                                    struct btrfs_key *ins)
7622 {
7623         int ret;
7624         struct btrfs_block_group_cache *block_group;
7625
7626         /*
7627          * Mixed block groups will exclude before processing the log so we only
7628          * need to do the exlude dance if this fs isn't mixed.
7629          */
7630         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7631                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7632                 if (ret)
7633                         return ret;
7634         }
7635
7636         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7637         if (!block_group)
7638                 return -EINVAL;
7639
7640         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7641                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7642         BUG_ON(ret); /* logic error */
7643         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7644                                          0, owner, offset, ins, 1);
7645         btrfs_put_block_group(block_group);
7646         return ret;
7647 }
7648
7649 static struct extent_buffer *
7650 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7651                       u64 bytenr, int level)
7652 {
7653         struct extent_buffer *buf;
7654
7655         buf = btrfs_find_create_tree_block(root, bytenr);
7656         if (!buf)
7657                 return ERR_PTR(-ENOMEM);
7658         btrfs_set_header_generation(buf, trans->transid);
7659         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7660         btrfs_tree_lock(buf);
7661         clean_tree_block(trans, root->fs_info, buf);
7662         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7663
7664         btrfs_set_lock_blocking(buf);
7665         btrfs_set_buffer_uptodate(buf);
7666
7667         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7668                 buf->log_index = root->log_transid % 2;
7669                 /*
7670                  * we allow two log transactions at a time, use different
7671                  * EXENT bit to differentiate dirty pages.
7672                  */
7673                 if (buf->log_index == 0)
7674                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7675                                         buf->start + buf->len - 1, GFP_NOFS);
7676                 else
7677                         set_extent_new(&root->dirty_log_pages, buf->start,
7678                                         buf->start + buf->len - 1, GFP_NOFS);
7679         } else {
7680                 buf->log_index = -1;
7681                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7682                          buf->start + buf->len - 1, GFP_NOFS);
7683         }
7684         trans->blocks_used++;
7685         /* this returns a buffer locked for blocking */
7686         return buf;
7687 }
7688
7689 static struct btrfs_block_rsv *
7690 use_block_rsv(struct btrfs_trans_handle *trans,
7691               struct btrfs_root *root, u32 blocksize)
7692 {
7693         struct btrfs_block_rsv *block_rsv;
7694         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7695         int ret;
7696         bool global_updated = false;
7697
7698         block_rsv = get_block_rsv(trans, root);
7699
7700         if (unlikely(block_rsv->size == 0))
7701                 goto try_reserve;
7702 again:
7703         ret = block_rsv_use_bytes(block_rsv, blocksize);
7704         if (!ret)
7705                 return block_rsv;
7706
7707         if (block_rsv->failfast)
7708                 return ERR_PTR(ret);
7709
7710         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7711                 global_updated = true;
7712                 update_global_block_rsv(root->fs_info);
7713                 goto again;
7714         }
7715
7716         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7717                 static DEFINE_RATELIMIT_STATE(_rs,
7718                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7719                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7720                 if (__ratelimit(&_rs))
7721                         WARN(1, KERN_DEBUG
7722                                 "BTRFS: block rsv returned %d\n", ret);
7723         }
7724 try_reserve:
7725         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7726                                      BTRFS_RESERVE_NO_FLUSH);
7727         if (!ret)
7728                 return block_rsv;
7729         /*
7730          * If we couldn't reserve metadata bytes try and use some from
7731          * the global reserve if its space type is the same as the global
7732          * reservation.
7733          */
7734         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7735             block_rsv->space_info == global_rsv->space_info) {
7736                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7737                 if (!ret)
7738                         return global_rsv;
7739         }
7740         return ERR_PTR(ret);
7741 }
7742
7743 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7744                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7745 {
7746         block_rsv_add_bytes(block_rsv, blocksize, 0);
7747         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7748 }
7749
7750 /*
7751  * finds a free extent and does all the dirty work required for allocation
7752  * returns the tree buffer or an ERR_PTR on error.
7753  */
7754 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7755                                         struct btrfs_root *root,
7756                                         u64 parent, u64 root_objectid,
7757                                         struct btrfs_disk_key *key, int level,
7758                                         u64 hint, u64 empty_size)
7759 {
7760         struct btrfs_key ins;
7761         struct btrfs_block_rsv *block_rsv;
7762         struct extent_buffer *buf;
7763         struct btrfs_delayed_extent_op *extent_op;
7764         u64 flags = 0;
7765         int ret;
7766         u32 blocksize = root->nodesize;
7767         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7768                                                  SKINNY_METADATA);
7769
7770         if (btrfs_test_is_dummy_root(root)) {
7771                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7772                                             level);
7773                 if (!IS_ERR(buf))
7774                         root->alloc_bytenr += blocksize;
7775                 return buf;
7776         }
7777
7778         block_rsv = use_block_rsv(trans, root, blocksize);
7779         if (IS_ERR(block_rsv))
7780                 return ERR_CAST(block_rsv);
7781
7782         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7783                                    empty_size, hint, &ins, 0, 0);
7784         if (ret)
7785                 goto out_unuse;
7786
7787         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7788         if (IS_ERR(buf)) {
7789                 ret = PTR_ERR(buf);
7790                 goto out_free_reserved;
7791         }
7792
7793         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7794                 if (parent == 0)
7795                         parent = ins.objectid;
7796                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7797         } else
7798                 BUG_ON(parent > 0);
7799
7800         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7801                 extent_op = btrfs_alloc_delayed_extent_op();
7802                 if (!extent_op) {
7803                         ret = -ENOMEM;
7804                         goto out_free_buf;
7805                 }
7806                 if (key)
7807                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7808                 else
7809                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7810                 extent_op->flags_to_set = flags;
7811                 if (skinny_metadata)
7812                         extent_op->update_key = 0;
7813                 else
7814                         extent_op->update_key = 1;
7815                 extent_op->update_flags = 1;
7816                 extent_op->is_data = 0;
7817                 extent_op->level = level;
7818
7819                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7820                                                  ins.objectid, ins.offset,
7821                                                  parent, root_objectid, level,
7822                                                  BTRFS_ADD_DELAYED_EXTENT,
7823                                                  extent_op, 0);
7824                 if (ret)
7825                         goto out_free_delayed;
7826         }
7827         return buf;
7828
7829 out_free_delayed:
7830         btrfs_free_delayed_extent_op(extent_op);
7831 out_free_buf:
7832         free_extent_buffer(buf);
7833 out_free_reserved:
7834         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7835 out_unuse:
7836         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7837         return ERR_PTR(ret);
7838 }
7839
7840 struct walk_control {
7841         u64 refs[BTRFS_MAX_LEVEL];
7842         u64 flags[BTRFS_MAX_LEVEL];
7843         struct btrfs_key update_progress;
7844         int stage;
7845         int level;
7846         int shared_level;
7847         int update_ref;
7848         int keep_locks;
7849         int reada_slot;
7850         int reada_count;
7851         int for_reloc;
7852 };
7853
7854 #define DROP_REFERENCE  1
7855 #define UPDATE_BACKREF  2
7856
7857 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7858                                      struct btrfs_root *root,
7859                                      struct walk_control *wc,
7860                                      struct btrfs_path *path)
7861 {
7862         u64 bytenr;
7863         u64 generation;
7864         u64 refs;
7865         u64 flags;
7866         u32 nritems;
7867         u32 blocksize;
7868         struct btrfs_key key;
7869         struct extent_buffer *eb;
7870         int ret;
7871         int slot;
7872         int nread = 0;
7873
7874         if (path->slots[wc->level] < wc->reada_slot) {
7875                 wc->reada_count = wc->reada_count * 2 / 3;
7876                 wc->reada_count = max(wc->reada_count, 2);
7877         } else {
7878                 wc->reada_count = wc->reada_count * 3 / 2;
7879                 wc->reada_count = min_t(int, wc->reada_count,
7880                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7881         }
7882
7883         eb = path->nodes[wc->level];
7884         nritems = btrfs_header_nritems(eb);
7885         blocksize = root->nodesize;
7886
7887         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7888                 if (nread >= wc->reada_count)
7889                         break;
7890
7891                 cond_resched();
7892                 bytenr = btrfs_node_blockptr(eb, slot);
7893                 generation = btrfs_node_ptr_generation(eb, slot);
7894
7895                 if (slot == path->slots[wc->level])
7896                         goto reada;
7897
7898                 if (wc->stage == UPDATE_BACKREF &&
7899                     generation <= root->root_key.offset)
7900                         continue;
7901
7902                 /* We don't lock the tree block, it's OK to be racy here */
7903                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7904                                                wc->level - 1, 1, &refs,
7905                                                &flags);
7906                 /* We don't care about errors in readahead. */
7907                 if (ret < 0)
7908                         continue;
7909                 BUG_ON(refs == 0);
7910
7911                 if (wc->stage == DROP_REFERENCE) {
7912                         if (refs == 1)
7913                                 goto reada;
7914
7915                         if (wc->level == 1 &&
7916                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7917                                 continue;
7918                         if (!wc->update_ref ||
7919                             generation <= root->root_key.offset)
7920                                 continue;
7921                         btrfs_node_key_to_cpu(eb, &key, slot);
7922                         ret = btrfs_comp_cpu_keys(&key,
7923                                                   &wc->update_progress);
7924                         if (ret < 0)
7925                                 continue;
7926                 } else {
7927                         if (wc->level == 1 &&
7928                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7929                                 continue;
7930                 }
7931 reada:
7932                 readahead_tree_block(root, bytenr);
7933                 nread++;
7934         }
7935         wc->reada_slot = slot;
7936 }
7937
7938 /*
7939  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7940  * for later qgroup accounting.
7941  *
7942  * Current, this function does nothing.
7943  */
7944 static int account_leaf_items(struct btrfs_trans_handle *trans,
7945                               struct btrfs_root *root,
7946                               struct extent_buffer *eb)
7947 {
7948         int nr = btrfs_header_nritems(eb);
7949         int i, extent_type;
7950         struct btrfs_key key;
7951         struct btrfs_file_extent_item *fi;
7952         u64 bytenr, num_bytes;
7953
7954         for (i = 0; i < nr; i++) {
7955                 btrfs_item_key_to_cpu(eb, &key, i);
7956
7957                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7958                         continue;
7959
7960                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7961                 /* filter out non qgroup-accountable extents  */
7962                 extent_type = btrfs_file_extent_type(eb, fi);
7963
7964                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7965                         continue;
7966
7967                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7968                 if (!bytenr)
7969                         continue;
7970
7971                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7972         }
7973         return 0;
7974 }
7975
7976 /*
7977  * Walk up the tree from the bottom, freeing leaves and any interior
7978  * nodes which have had all slots visited. If a node (leaf or
7979  * interior) is freed, the node above it will have it's slot
7980  * incremented. The root node will never be freed.
7981  *
7982  * At the end of this function, we should have a path which has all
7983  * slots incremented to the next position for a search. If we need to
7984  * read a new node it will be NULL and the node above it will have the
7985  * correct slot selected for a later read.
7986  *
7987  * If we increment the root nodes slot counter past the number of
7988  * elements, 1 is returned to signal completion of the search.
7989  */
7990 static int adjust_slots_upwards(struct btrfs_root *root,
7991                                 struct btrfs_path *path, int root_level)
7992 {
7993         int level = 0;
7994         int nr, slot;
7995         struct extent_buffer *eb;
7996
7997         if (root_level == 0)
7998                 return 1;
7999
8000         while (level <= root_level) {
8001                 eb = path->nodes[level];
8002                 nr = btrfs_header_nritems(eb);
8003                 path->slots[level]++;
8004                 slot = path->slots[level];
8005                 if (slot >= nr || level == 0) {
8006                         /*
8007                          * Don't free the root -  we will detect this
8008                          * condition after our loop and return a
8009                          * positive value for caller to stop walking the tree.
8010                          */
8011                         if (level != root_level) {
8012                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8013                                 path->locks[level] = 0;
8014
8015                                 free_extent_buffer(eb);
8016                                 path->nodes[level] = NULL;
8017                                 path->slots[level] = 0;
8018                         }
8019                 } else {
8020                         /*
8021                          * We have a valid slot to walk back down
8022                          * from. Stop here so caller can process these
8023                          * new nodes.
8024                          */
8025                         break;
8026                 }
8027
8028                 level++;
8029         }
8030
8031         eb = path->nodes[root_level];
8032         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8033                 return 1;
8034
8035         return 0;
8036 }
8037
8038 /*
8039  * root_eb is the subtree root and is locked before this function is called.
8040  * TODO: Modify this function to mark all (including complete shared node)
8041  * to dirty_extent_root to allow it get accounted in qgroup.
8042  */
8043 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8044                                   struct btrfs_root *root,
8045                                   struct extent_buffer *root_eb,
8046                                   u64 root_gen,
8047                                   int root_level)
8048 {
8049         int ret = 0;
8050         int level;
8051         struct extent_buffer *eb = root_eb;
8052         struct btrfs_path *path = NULL;
8053
8054         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8055         BUG_ON(root_eb == NULL);
8056
8057         if (!root->fs_info->quota_enabled)
8058                 return 0;
8059
8060         if (!extent_buffer_uptodate(root_eb)) {
8061                 ret = btrfs_read_buffer(root_eb, root_gen);
8062                 if (ret)
8063                         goto out;
8064         }
8065
8066         if (root_level == 0) {
8067                 ret = account_leaf_items(trans, root, root_eb);
8068                 goto out;
8069         }
8070
8071         path = btrfs_alloc_path();
8072         if (!path)
8073                 return -ENOMEM;
8074
8075         /*
8076          * Walk down the tree.  Missing extent blocks are filled in as
8077          * we go. Metadata is accounted every time we read a new
8078          * extent block.
8079          *
8080          * When we reach a leaf, we account for file extent items in it,
8081          * walk back up the tree (adjusting slot pointers as we go)
8082          * and restart the search process.
8083          */
8084         extent_buffer_get(root_eb); /* For path */
8085         path->nodes[root_level] = root_eb;
8086         path->slots[root_level] = 0;
8087         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8088 walk_down:
8089         level = root_level;
8090         while (level >= 0) {
8091                 if (path->nodes[level] == NULL) {
8092                         int parent_slot;
8093                         u64 child_gen;
8094                         u64 child_bytenr;
8095
8096                         /* We need to get child blockptr/gen from
8097                          * parent before we can read it. */
8098                         eb = path->nodes[level + 1];
8099                         parent_slot = path->slots[level + 1];
8100                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8101                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8102
8103                         eb = read_tree_block(root, child_bytenr, child_gen);
8104                         if (IS_ERR(eb)) {
8105                                 ret = PTR_ERR(eb);
8106                                 goto out;
8107                         } else if (!extent_buffer_uptodate(eb)) {
8108                                 free_extent_buffer(eb);
8109                                 ret = -EIO;
8110                                 goto out;
8111                         }
8112
8113                         path->nodes[level] = eb;
8114                         path->slots[level] = 0;
8115
8116                         btrfs_tree_read_lock(eb);
8117                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8118                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8119                 }
8120
8121                 if (level == 0) {
8122                         ret = account_leaf_items(trans, root, path->nodes[level]);
8123                         if (ret)
8124                                 goto out;
8125
8126                         /* Nonzero return here means we completed our search */
8127                         ret = adjust_slots_upwards(root, path, root_level);
8128                         if (ret)
8129                                 break;
8130
8131                         /* Restart search with new slots */
8132                         goto walk_down;
8133                 }
8134
8135                 level--;
8136         }
8137
8138         ret = 0;
8139 out:
8140         btrfs_free_path(path);
8141
8142         return ret;
8143 }
8144
8145 /*
8146  * helper to process tree block while walking down the tree.
8147  *
8148  * when wc->stage == UPDATE_BACKREF, this function updates
8149  * back refs for pointers in the block.
8150  *
8151  * NOTE: return value 1 means we should stop walking down.
8152  */
8153 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8154                                    struct btrfs_root *root,
8155                                    struct btrfs_path *path,
8156                                    struct walk_control *wc, int lookup_info)
8157 {
8158         int level = wc->level;
8159         struct extent_buffer *eb = path->nodes[level];
8160         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8161         int ret;
8162
8163         if (wc->stage == UPDATE_BACKREF &&
8164             btrfs_header_owner(eb) != root->root_key.objectid)
8165                 return 1;
8166
8167         /*
8168          * when reference count of tree block is 1, it won't increase
8169          * again. once full backref flag is set, we never clear it.
8170          */
8171         if (lookup_info &&
8172             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8173              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8174                 BUG_ON(!path->locks[level]);
8175                 ret = btrfs_lookup_extent_info(trans, root,
8176                                                eb->start, level, 1,
8177                                                &wc->refs[level],
8178                                                &wc->flags[level]);
8179                 BUG_ON(ret == -ENOMEM);
8180                 if (ret)
8181                         return ret;
8182                 BUG_ON(wc->refs[level] == 0);
8183         }
8184
8185         if (wc->stage == DROP_REFERENCE) {
8186                 if (wc->refs[level] > 1)
8187                         return 1;
8188
8189                 if (path->locks[level] && !wc->keep_locks) {
8190                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8191                         path->locks[level] = 0;
8192                 }
8193                 return 0;
8194         }
8195
8196         /* wc->stage == UPDATE_BACKREF */
8197         if (!(wc->flags[level] & flag)) {
8198                 BUG_ON(!path->locks[level]);
8199                 ret = btrfs_inc_ref(trans, root, eb, 1);
8200                 BUG_ON(ret); /* -ENOMEM */
8201                 ret = btrfs_dec_ref(trans, root, eb, 0);
8202                 BUG_ON(ret); /* -ENOMEM */
8203                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8204                                                   eb->len, flag,
8205                                                   btrfs_header_level(eb), 0);
8206                 BUG_ON(ret); /* -ENOMEM */
8207                 wc->flags[level] |= flag;
8208         }
8209
8210         /*
8211          * the block is shared by multiple trees, so it's not good to
8212          * keep the tree lock
8213          */
8214         if (path->locks[level] && level > 0) {
8215                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8216                 path->locks[level] = 0;
8217         }
8218         return 0;
8219 }
8220
8221 /*
8222  * helper to process tree block pointer.
8223  *
8224  * when wc->stage == DROP_REFERENCE, this function checks
8225  * reference count of the block pointed to. if the block
8226  * is shared and we need update back refs for the subtree
8227  * rooted at the block, this function changes wc->stage to
8228  * UPDATE_BACKREF. if the block is shared and there is no
8229  * need to update back, this function drops the reference
8230  * to the block.
8231  *
8232  * NOTE: return value 1 means we should stop walking down.
8233  */
8234 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8235                                  struct btrfs_root *root,
8236                                  struct btrfs_path *path,
8237                                  struct walk_control *wc, int *lookup_info)
8238 {
8239         u64 bytenr;
8240         u64 generation;
8241         u64 parent;
8242         u32 blocksize;
8243         struct btrfs_key key;
8244         struct extent_buffer *next;
8245         int level = wc->level;
8246         int reada = 0;
8247         int ret = 0;
8248         bool need_account = false;
8249
8250         generation = btrfs_node_ptr_generation(path->nodes[level],
8251                                                path->slots[level]);
8252         /*
8253          * if the lower level block was created before the snapshot
8254          * was created, we know there is no need to update back refs
8255          * for the subtree
8256          */
8257         if (wc->stage == UPDATE_BACKREF &&
8258             generation <= root->root_key.offset) {
8259                 *lookup_info = 1;
8260                 return 1;
8261         }
8262
8263         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8264         blocksize = root->nodesize;
8265
8266         next = btrfs_find_tree_block(root->fs_info, bytenr);
8267         if (!next) {
8268                 next = btrfs_find_create_tree_block(root, bytenr);
8269                 if (!next)
8270                         return -ENOMEM;
8271                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8272                                                level - 1);
8273                 reada = 1;
8274         }
8275         btrfs_tree_lock(next);
8276         btrfs_set_lock_blocking(next);
8277
8278         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8279                                        &wc->refs[level - 1],
8280                                        &wc->flags[level - 1]);
8281         if (ret < 0) {
8282                 btrfs_tree_unlock(next);
8283                 return ret;
8284         }
8285
8286         if (unlikely(wc->refs[level - 1] == 0)) {
8287                 btrfs_err(root->fs_info, "Missing references.");
8288                 BUG();
8289         }
8290         *lookup_info = 0;
8291
8292         if (wc->stage == DROP_REFERENCE) {
8293                 if (wc->refs[level - 1] > 1) {
8294                         need_account = true;
8295                         if (level == 1 &&
8296                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8297                                 goto skip;
8298
8299                         if (!wc->update_ref ||
8300                             generation <= root->root_key.offset)
8301                                 goto skip;
8302
8303                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8304                                               path->slots[level]);
8305                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8306                         if (ret < 0)
8307                                 goto skip;
8308
8309                         wc->stage = UPDATE_BACKREF;
8310                         wc->shared_level = level - 1;
8311                 }
8312         } else {
8313                 if (level == 1 &&
8314                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8315                         goto skip;
8316         }
8317
8318         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8319                 btrfs_tree_unlock(next);
8320                 free_extent_buffer(next);
8321                 next = NULL;
8322                 *lookup_info = 1;
8323         }
8324
8325         if (!next) {
8326                 if (reada && level == 1)
8327                         reada_walk_down(trans, root, wc, path);
8328                 next = read_tree_block(root, bytenr, generation);
8329                 if (IS_ERR(next)) {
8330                         return PTR_ERR(next);
8331                 } else if (!extent_buffer_uptodate(next)) {
8332                         free_extent_buffer(next);
8333                         return -EIO;
8334                 }
8335                 btrfs_tree_lock(next);
8336                 btrfs_set_lock_blocking(next);
8337         }
8338
8339         level--;
8340         BUG_ON(level != btrfs_header_level(next));
8341         path->nodes[level] = next;
8342         path->slots[level] = 0;
8343         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8344         wc->level = level;
8345         if (wc->level == 1)
8346                 wc->reada_slot = 0;
8347         return 0;
8348 skip:
8349         wc->refs[level - 1] = 0;
8350         wc->flags[level - 1] = 0;
8351         if (wc->stage == DROP_REFERENCE) {
8352                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8353                         parent = path->nodes[level]->start;
8354                 } else {
8355                         BUG_ON(root->root_key.objectid !=
8356                                btrfs_header_owner(path->nodes[level]));
8357                         parent = 0;
8358                 }
8359
8360                 if (need_account) {
8361                         ret = account_shared_subtree(trans, root, next,
8362                                                      generation, level - 1);
8363                         if (ret) {
8364                                 btrfs_err_rl(root->fs_info,
8365                                         "Error "
8366                                         "%d accounting shared subtree. Quota "
8367                                         "is out of sync, rescan required.",
8368                                         ret);
8369                         }
8370                 }
8371                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8372                                 root->root_key.objectid, level - 1, 0, 0);
8373                 BUG_ON(ret); /* -ENOMEM */
8374         }
8375         btrfs_tree_unlock(next);
8376         free_extent_buffer(next);
8377         *lookup_info = 1;
8378         return 1;
8379 }
8380
8381 /*
8382  * helper to process tree block while walking up the tree.
8383  *
8384  * when wc->stage == DROP_REFERENCE, this function drops
8385  * reference count on the block.
8386  *
8387  * when wc->stage == UPDATE_BACKREF, this function changes
8388  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8389  * to UPDATE_BACKREF previously while processing the block.
8390  *
8391  * NOTE: return value 1 means we should stop walking up.
8392  */
8393 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8394                                  struct btrfs_root *root,
8395                                  struct btrfs_path *path,
8396                                  struct walk_control *wc)
8397 {
8398         int ret;
8399         int level = wc->level;
8400         struct extent_buffer *eb = path->nodes[level];
8401         u64 parent = 0;
8402
8403         if (wc->stage == UPDATE_BACKREF) {
8404                 BUG_ON(wc->shared_level < level);
8405                 if (level < wc->shared_level)
8406                         goto out;
8407
8408                 ret = find_next_key(path, level + 1, &wc->update_progress);
8409                 if (ret > 0)
8410                         wc->update_ref = 0;
8411
8412                 wc->stage = DROP_REFERENCE;
8413                 wc->shared_level = -1;
8414                 path->slots[level] = 0;
8415
8416                 /*
8417                  * check reference count again if the block isn't locked.
8418                  * we should start walking down the tree again if reference
8419                  * count is one.
8420                  */
8421                 if (!path->locks[level]) {
8422                         BUG_ON(level == 0);
8423                         btrfs_tree_lock(eb);
8424                         btrfs_set_lock_blocking(eb);
8425                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8426
8427                         ret = btrfs_lookup_extent_info(trans, root,
8428                                                        eb->start, level, 1,
8429                                                        &wc->refs[level],
8430                                                        &wc->flags[level]);
8431                         if (ret < 0) {
8432                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8433                                 path->locks[level] = 0;
8434                                 return ret;
8435                         }
8436                         BUG_ON(wc->refs[level] == 0);
8437                         if (wc->refs[level] == 1) {
8438                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8439                                 path->locks[level] = 0;
8440                                 return 1;
8441                         }
8442                 }
8443         }
8444
8445         /* wc->stage == DROP_REFERENCE */
8446         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8447
8448         if (wc->refs[level] == 1) {
8449                 if (level == 0) {
8450                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8451                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8452                         else
8453                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8454                         BUG_ON(ret); /* -ENOMEM */
8455                         ret = account_leaf_items(trans, root, eb);
8456                         if (ret) {
8457                                 btrfs_err_rl(root->fs_info,
8458                                         "error "
8459                                         "%d accounting leaf items. Quota "
8460                                         "is out of sync, rescan required.",
8461                                         ret);
8462                         }
8463                 }
8464                 /* make block locked assertion in clean_tree_block happy */
8465                 if (!path->locks[level] &&
8466                     btrfs_header_generation(eb) == trans->transid) {
8467                         btrfs_tree_lock(eb);
8468                         btrfs_set_lock_blocking(eb);
8469                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8470                 }
8471                 clean_tree_block(trans, root->fs_info, eb);
8472         }
8473
8474         if (eb == root->node) {
8475                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8476                         parent = eb->start;
8477                 else
8478                         BUG_ON(root->root_key.objectid !=
8479                                btrfs_header_owner(eb));
8480         } else {
8481                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8482                         parent = path->nodes[level + 1]->start;
8483                 else
8484                         BUG_ON(root->root_key.objectid !=
8485                                btrfs_header_owner(path->nodes[level + 1]));
8486         }
8487
8488         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8489 out:
8490         wc->refs[level] = 0;
8491         wc->flags[level] = 0;
8492         return 0;
8493 }
8494
8495 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8496                                    struct btrfs_root *root,
8497                                    struct btrfs_path *path,
8498                                    struct walk_control *wc)
8499 {
8500         int level = wc->level;
8501         int lookup_info = 1;
8502         int ret;
8503
8504         while (level >= 0) {
8505                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8506                 if (ret > 0)
8507                         break;
8508
8509                 if (level == 0)
8510                         break;
8511
8512                 if (path->slots[level] >=
8513                     btrfs_header_nritems(path->nodes[level]))
8514                         break;
8515
8516                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8517                 if (ret > 0) {
8518                         path->slots[level]++;
8519                         continue;
8520                 } else if (ret < 0)
8521                         return ret;
8522                 level = wc->level;
8523         }
8524         return 0;
8525 }
8526
8527 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8528                                  struct btrfs_root *root,
8529                                  struct btrfs_path *path,
8530                                  struct walk_control *wc, int max_level)
8531 {
8532         int level = wc->level;
8533         int ret;
8534
8535         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8536         while (level < max_level && path->nodes[level]) {
8537                 wc->level = level;
8538                 if (path->slots[level] + 1 <
8539                     btrfs_header_nritems(path->nodes[level])) {
8540                         path->slots[level]++;
8541                         return 0;
8542                 } else {
8543                         ret = walk_up_proc(trans, root, path, wc);
8544                         if (ret > 0)
8545                                 return 0;
8546
8547                         if (path->locks[level]) {
8548                                 btrfs_tree_unlock_rw(path->nodes[level],
8549                                                      path->locks[level]);
8550                                 path->locks[level] = 0;
8551                         }
8552                         free_extent_buffer(path->nodes[level]);
8553                         path->nodes[level] = NULL;
8554                         level++;
8555                 }
8556         }
8557         return 1;
8558 }
8559
8560 /*
8561  * drop a subvolume tree.
8562  *
8563  * this function traverses the tree freeing any blocks that only
8564  * referenced by the tree.
8565  *
8566  * when a shared tree block is found. this function decreases its
8567  * reference count by one. if update_ref is true, this function
8568  * also make sure backrefs for the shared block and all lower level
8569  * blocks are properly updated.
8570  *
8571  * If called with for_reloc == 0, may exit early with -EAGAIN
8572  */
8573 int btrfs_drop_snapshot(struct btrfs_root *root,
8574                          struct btrfs_block_rsv *block_rsv, int update_ref,
8575                          int for_reloc)
8576 {
8577         struct btrfs_path *path;
8578         struct btrfs_trans_handle *trans;
8579         struct btrfs_root *tree_root = root->fs_info->tree_root;
8580         struct btrfs_root_item *root_item = &root->root_item;
8581         struct walk_control *wc;
8582         struct btrfs_key key;
8583         int err = 0;
8584         int ret;
8585         int level;
8586         bool root_dropped = false;
8587
8588         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8589
8590         path = btrfs_alloc_path();
8591         if (!path) {
8592                 err = -ENOMEM;
8593                 goto out;
8594         }
8595
8596         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8597         if (!wc) {
8598                 btrfs_free_path(path);
8599                 err = -ENOMEM;
8600                 goto out;
8601         }
8602
8603         trans = btrfs_start_transaction(tree_root, 0);
8604         if (IS_ERR(trans)) {
8605                 err = PTR_ERR(trans);
8606                 goto out_free;
8607         }
8608
8609         if (block_rsv)
8610                 trans->block_rsv = block_rsv;
8611
8612         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8613                 level = btrfs_header_level(root->node);
8614                 path->nodes[level] = btrfs_lock_root_node(root);
8615                 btrfs_set_lock_blocking(path->nodes[level]);
8616                 path->slots[level] = 0;
8617                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8618                 memset(&wc->update_progress, 0,
8619                        sizeof(wc->update_progress));
8620         } else {
8621                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8622                 memcpy(&wc->update_progress, &key,
8623                        sizeof(wc->update_progress));
8624
8625                 level = root_item->drop_level;
8626                 BUG_ON(level == 0);
8627                 path->lowest_level = level;
8628                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8629                 path->lowest_level = 0;
8630                 if (ret < 0) {
8631                         err = ret;
8632                         goto out_end_trans;
8633                 }
8634                 WARN_ON(ret > 0);
8635
8636                 /*
8637                  * unlock our path, this is safe because only this
8638                  * function is allowed to delete this snapshot
8639                  */
8640                 btrfs_unlock_up_safe(path, 0);
8641
8642                 level = btrfs_header_level(root->node);
8643                 while (1) {
8644                         btrfs_tree_lock(path->nodes[level]);
8645                         btrfs_set_lock_blocking(path->nodes[level]);
8646                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8647
8648                         ret = btrfs_lookup_extent_info(trans, root,
8649                                                 path->nodes[level]->start,
8650                                                 level, 1, &wc->refs[level],
8651                                                 &wc->flags[level]);
8652                         if (ret < 0) {
8653                                 err = ret;
8654                                 goto out_end_trans;
8655                         }
8656                         BUG_ON(wc->refs[level] == 0);
8657
8658                         if (level == root_item->drop_level)
8659                                 break;
8660
8661                         btrfs_tree_unlock(path->nodes[level]);
8662                         path->locks[level] = 0;
8663                         WARN_ON(wc->refs[level] != 1);
8664                         level--;
8665                 }
8666         }
8667
8668         wc->level = level;
8669         wc->shared_level = -1;
8670         wc->stage = DROP_REFERENCE;
8671         wc->update_ref = update_ref;
8672         wc->keep_locks = 0;
8673         wc->for_reloc = for_reloc;
8674         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8675
8676         while (1) {
8677
8678                 ret = walk_down_tree(trans, root, path, wc);
8679                 if (ret < 0) {
8680                         err = ret;
8681                         break;
8682                 }
8683
8684                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8685                 if (ret < 0) {
8686                         err = ret;
8687                         break;
8688                 }
8689
8690                 if (ret > 0) {
8691                         BUG_ON(wc->stage != DROP_REFERENCE);
8692                         break;
8693                 }
8694
8695                 if (wc->stage == DROP_REFERENCE) {
8696                         level = wc->level;
8697                         btrfs_node_key(path->nodes[level],
8698                                        &root_item->drop_progress,
8699                                        path->slots[level]);
8700                         root_item->drop_level = level;
8701                 }
8702
8703                 BUG_ON(wc->level == 0);
8704                 if (btrfs_should_end_transaction(trans, tree_root) ||
8705                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8706                         ret = btrfs_update_root(trans, tree_root,
8707                                                 &root->root_key,
8708                                                 root_item);
8709                         if (ret) {
8710                                 btrfs_abort_transaction(trans, tree_root, ret);
8711                                 err = ret;
8712                                 goto out_end_trans;
8713                         }
8714
8715                         btrfs_end_transaction_throttle(trans, tree_root);
8716                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8717                                 pr_debug("BTRFS: drop snapshot early exit\n");
8718                                 err = -EAGAIN;
8719                                 goto out_free;
8720                         }
8721
8722                         trans = btrfs_start_transaction(tree_root, 0);
8723                         if (IS_ERR(trans)) {
8724                                 err = PTR_ERR(trans);
8725                                 goto out_free;
8726                         }
8727                         if (block_rsv)
8728                                 trans->block_rsv = block_rsv;
8729                 }
8730         }
8731         btrfs_release_path(path);
8732         if (err)
8733                 goto out_end_trans;
8734
8735         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8736         if (ret) {
8737                 btrfs_abort_transaction(trans, tree_root, ret);
8738                 goto out_end_trans;
8739         }
8740
8741         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8742                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8743                                       NULL, NULL);
8744                 if (ret < 0) {
8745                         btrfs_abort_transaction(trans, tree_root, ret);
8746                         err = ret;
8747                         goto out_end_trans;
8748                 } else if (ret > 0) {
8749                         /* if we fail to delete the orphan item this time
8750                          * around, it'll get picked up the next time.
8751                          *
8752                          * The most common failure here is just -ENOENT.
8753                          */
8754                         btrfs_del_orphan_item(trans, tree_root,
8755                                               root->root_key.objectid);
8756                 }
8757         }
8758
8759         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8760                 btrfs_add_dropped_root(trans, root);
8761         } else {
8762                 free_extent_buffer(root->node);
8763                 free_extent_buffer(root->commit_root);
8764                 btrfs_put_fs_root(root);
8765         }
8766         root_dropped = true;
8767 out_end_trans:
8768         btrfs_end_transaction_throttle(trans, tree_root);
8769 out_free:
8770         kfree(wc);
8771         btrfs_free_path(path);
8772 out:
8773         /*
8774          * So if we need to stop dropping the snapshot for whatever reason we
8775          * need to make sure to add it back to the dead root list so that we
8776          * keep trying to do the work later.  This also cleans up roots if we
8777          * don't have it in the radix (like when we recover after a power fail
8778          * or unmount) so we don't leak memory.
8779          */
8780         if (!for_reloc && root_dropped == false)
8781                 btrfs_add_dead_root(root);
8782         if (err && err != -EAGAIN)
8783                 btrfs_std_error(root->fs_info, err, NULL);
8784         return err;
8785 }
8786
8787 /*
8788  * drop subtree rooted at tree block 'node'.
8789  *
8790  * NOTE: this function will unlock and release tree block 'node'
8791  * only used by relocation code
8792  */
8793 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8794                         struct btrfs_root *root,
8795                         struct extent_buffer *node,
8796                         struct extent_buffer *parent)
8797 {
8798         struct btrfs_path *path;
8799         struct walk_control *wc;
8800         int level;
8801         int parent_level;
8802         int ret = 0;
8803         int wret;
8804
8805         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8806
8807         path = btrfs_alloc_path();
8808         if (!path)
8809                 return -ENOMEM;
8810
8811         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8812         if (!wc) {
8813                 btrfs_free_path(path);
8814                 return -ENOMEM;
8815         }
8816
8817         btrfs_assert_tree_locked(parent);
8818         parent_level = btrfs_header_level(parent);
8819         extent_buffer_get(parent);
8820         path->nodes[parent_level] = parent;
8821         path->slots[parent_level] = btrfs_header_nritems(parent);
8822
8823         btrfs_assert_tree_locked(node);
8824         level = btrfs_header_level(node);
8825         path->nodes[level] = node;
8826         path->slots[level] = 0;
8827         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8828
8829         wc->refs[parent_level] = 1;
8830         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8831         wc->level = level;
8832         wc->shared_level = -1;
8833         wc->stage = DROP_REFERENCE;
8834         wc->update_ref = 0;
8835         wc->keep_locks = 1;
8836         wc->for_reloc = 1;
8837         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8838
8839         while (1) {
8840                 wret = walk_down_tree(trans, root, path, wc);
8841                 if (wret < 0) {
8842                         ret = wret;
8843                         break;
8844                 }
8845
8846                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8847                 if (wret < 0)
8848                         ret = wret;
8849                 if (wret != 0)
8850                         break;
8851         }
8852
8853         kfree(wc);
8854         btrfs_free_path(path);
8855         return ret;
8856 }
8857
8858 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8859 {
8860         u64 num_devices;
8861         u64 stripped;
8862
8863         /*
8864          * if restripe for this chunk_type is on pick target profile and
8865          * return, otherwise do the usual balance
8866          */
8867         stripped = get_restripe_target(root->fs_info, flags);
8868         if (stripped)
8869                 return extended_to_chunk(stripped);
8870
8871         num_devices = root->fs_info->fs_devices->rw_devices;
8872
8873         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8874                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8875                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8876
8877         if (num_devices == 1) {
8878                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8879                 stripped = flags & ~stripped;
8880
8881                 /* turn raid0 into single device chunks */
8882                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8883                         return stripped;
8884
8885                 /* turn mirroring into duplication */
8886                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8887                              BTRFS_BLOCK_GROUP_RAID10))
8888                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8889         } else {
8890                 /* they already had raid on here, just return */
8891                 if (flags & stripped)
8892                         return flags;
8893
8894                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8895                 stripped = flags & ~stripped;
8896
8897                 /* switch duplicated blocks with raid1 */
8898                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8899                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8900
8901                 /* this is drive concat, leave it alone */
8902         }
8903
8904         return flags;
8905 }
8906
8907 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8908 {
8909         struct btrfs_space_info *sinfo = cache->space_info;
8910         u64 num_bytes;
8911         u64 min_allocable_bytes;
8912         int ret = -ENOSPC;
8913
8914         /*
8915          * We need some metadata space and system metadata space for
8916          * allocating chunks in some corner cases until we force to set
8917          * it to be readonly.
8918          */
8919         if ((sinfo->flags &
8920              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8921             !force)
8922                 min_allocable_bytes = 1 * 1024 * 1024;
8923         else
8924                 min_allocable_bytes = 0;
8925
8926         spin_lock(&sinfo->lock);
8927         spin_lock(&cache->lock);
8928
8929         if (cache->ro) {
8930                 cache->ro++;
8931                 ret = 0;
8932                 goto out;
8933         }
8934
8935         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8936                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8937
8938         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8939             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8940             min_allocable_bytes <= sinfo->total_bytes) {
8941                 sinfo->bytes_readonly += num_bytes;
8942                 cache->ro++;
8943                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8944                 ret = 0;
8945         }
8946 out:
8947         spin_unlock(&cache->lock);
8948         spin_unlock(&sinfo->lock);
8949         return ret;
8950 }
8951
8952 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8953                              struct btrfs_block_group_cache *cache)
8954
8955 {
8956         struct btrfs_trans_handle *trans;
8957         u64 alloc_flags;
8958         int ret;
8959
8960 again:
8961         trans = btrfs_join_transaction(root);
8962         if (IS_ERR(trans))
8963                 return PTR_ERR(trans);
8964
8965         /*
8966          * we're not allowed to set block groups readonly after the dirty
8967          * block groups cache has started writing.  If it already started,
8968          * back off and let this transaction commit
8969          */
8970         mutex_lock(&root->fs_info->ro_block_group_mutex);
8971         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
8972                 u64 transid = trans->transid;
8973
8974                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8975                 btrfs_end_transaction(trans, root);
8976
8977                 ret = btrfs_wait_for_commit(root, transid);
8978                 if (ret)
8979                         return ret;
8980                 goto again;
8981         }
8982
8983         /*
8984          * if we are changing raid levels, try to allocate a corresponding
8985          * block group with the new raid level.
8986          */
8987         alloc_flags = update_block_group_flags(root, cache->flags);
8988         if (alloc_flags != cache->flags) {
8989                 ret = do_chunk_alloc(trans, root, alloc_flags,
8990                                      CHUNK_ALLOC_FORCE);
8991                 /*
8992                  * ENOSPC is allowed here, we may have enough space
8993                  * already allocated at the new raid level to
8994                  * carry on
8995                  */
8996                 if (ret == -ENOSPC)
8997                         ret = 0;
8998                 if (ret < 0)
8999                         goto out;
9000         }
9001
9002         ret = inc_block_group_ro(cache, 0);
9003         if (!ret)
9004                 goto out;
9005         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9006         ret = do_chunk_alloc(trans, root, alloc_flags,
9007                              CHUNK_ALLOC_FORCE);
9008         if (ret < 0)
9009                 goto out;
9010         ret = inc_block_group_ro(cache, 0);
9011 out:
9012         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9013                 alloc_flags = update_block_group_flags(root, cache->flags);
9014                 lock_chunks(root->fs_info->chunk_root);
9015                 check_system_chunk(trans, root, alloc_flags);
9016                 unlock_chunks(root->fs_info->chunk_root);
9017         }
9018         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9019
9020         btrfs_end_transaction(trans, root);
9021         return ret;
9022 }
9023
9024 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9025                             struct btrfs_root *root, u64 type)
9026 {
9027         u64 alloc_flags = get_alloc_profile(root, type);
9028         return do_chunk_alloc(trans, root, alloc_flags,
9029                               CHUNK_ALLOC_FORCE);
9030 }
9031
9032 /*
9033  * helper to account the unused space of all the readonly block group in the
9034  * space_info. takes mirrors into account.
9035  */
9036 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9037 {
9038         struct btrfs_block_group_cache *block_group;
9039         u64 free_bytes = 0;
9040         int factor;
9041
9042         /* It's df, we don't care if it's racey */
9043         if (list_empty(&sinfo->ro_bgs))
9044                 return 0;
9045
9046         spin_lock(&sinfo->lock);
9047         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9048                 spin_lock(&block_group->lock);
9049
9050                 if (!block_group->ro) {
9051                         spin_unlock(&block_group->lock);
9052                         continue;
9053                 }
9054
9055                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9056                                           BTRFS_BLOCK_GROUP_RAID10 |
9057                                           BTRFS_BLOCK_GROUP_DUP))
9058                         factor = 2;
9059                 else
9060                         factor = 1;
9061
9062                 free_bytes += (block_group->key.offset -
9063                                btrfs_block_group_used(&block_group->item)) *
9064                                factor;
9065
9066                 spin_unlock(&block_group->lock);
9067         }
9068         spin_unlock(&sinfo->lock);
9069
9070         return free_bytes;
9071 }
9072
9073 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9074                               struct btrfs_block_group_cache *cache)
9075 {
9076         struct btrfs_space_info *sinfo = cache->space_info;
9077         u64 num_bytes;
9078
9079         BUG_ON(!cache->ro);
9080
9081         spin_lock(&sinfo->lock);
9082         spin_lock(&cache->lock);
9083         if (!--cache->ro) {
9084                 num_bytes = cache->key.offset - cache->reserved -
9085                             cache->pinned - cache->bytes_super -
9086                             btrfs_block_group_used(&cache->item);
9087                 sinfo->bytes_readonly -= num_bytes;
9088                 list_del_init(&cache->ro_list);
9089         }
9090         spin_unlock(&cache->lock);
9091         spin_unlock(&sinfo->lock);
9092 }
9093
9094 /*
9095  * checks to see if its even possible to relocate this block group.
9096  *
9097  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9098  * ok to go ahead and try.
9099  */
9100 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9101 {
9102         struct btrfs_block_group_cache *block_group;
9103         struct btrfs_space_info *space_info;
9104         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9105         struct btrfs_device *device;
9106         struct btrfs_trans_handle *trans;
9107         u64 min_free;
9108         u64 dev_min = 1;
9109         u64 dev_nr = 0;
9110         u64 target;
9111         int index;
9112         int full = 0;
9113         int ret = 0;
9114
9115         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9116
9117         /* odd, couldn't find the block group, leave it alone */
9118         if (!block_group)
9119                 return -1;
9120
9121         min_free = btrfs_block_group_used(&block_group->item);
9122
9123         /* no bytes used, we're good */
9124         if (!min_free)
9125                 goto out;
9126
9127         space_info = block_group->space_info;
9128         spin_lock(&space_info->lock);
9129
9130         full = space_info->full;
9131
9132         /*
9133          * if this is the last block group we have in this space, we can't
9134          * relocate it unless we're able to allocate a new chunk below.
9135          *
9136          * Otherwise, we need to make sure we have room in the space to handle
9137          * all of the extents from this block group.  If we can, we're good
9138          */
9139         if ((space_info->total_bytes != block_group->key.offset) &&
9140             (space_info->bytes_used + space_info->bytes_reserved +
9141              space_info->bytes_pinned + space_info->bytes_readonly +
9142              min_free < space_info->total_bytes)) {
9143                 spin_unlock(&space_info->lock);
9144                 goto out;
9145         }
9146         spin_unlock(&space_info->lock);
9147
9148         /*
9149          * ok we don't have enough space, but maybe we have free space on our
9150          * devices to allocate new chunks for relocation, so loop through our
9151          * alloc devices and guess if we have enough space.  if this block
9152          * group is going to be restriped, run checks against the target
9153          * profile instead of the current one.
9154          */
9155         ret = -1;
9156
9157         /*
9158          * index:
9159          *      0: raid10
9160          *      1: raid1
9161          *      2: dup
9162          *      3: raid0
9163          *      4: single
9164          */
9165         target = get_restripe_target(root->fs_info, block_group->flags);
9166         if (target) {
9167                 index = __get_raid_index(extended_to_chunk(target));
9168         } else {
9169                 /*
9170                  * this is just a balance, so if we were marked as full
9171                  * we know there is no space for a new chunk
9172                  */
9173                 if (full)
9174                         goto out;
9175
9176                 index = get_block_group_index(block_group);
9177         }
9178
9179         if (index == BTRFS_RAID_RAID10) {
9180                 dev_min = 4;
9181                 /* Divide by 2 */
9182                 min_free >>= 1;
9183         } else if (index == BTRFS_RAID_RAID1) {
9184                 dev_min = 2;
9185         } else if (index == BTRFS_RAID_DUP) {
9186                 /* Multiply by 2 */
9187                 min_free <<= 1;
9188         } else if (index == BTRFS_RAID_RAID0) {
9189                 dev_min = fs_devices->rw_devices;
9190                 min_free = div64_u64(min_free, dev_min);
9191         }
9192
9193         /* We need to do this so that we can look at pending chunks */
9194         trans = btrfs_join_transaction(root);
9195         if (IS_ERR(trans)) {
9196                 ret = PTR_ERR(trans);
9197                 goto out;
9198         }
9199
9200         mutex_lock(&root->fs_info->chunk_mutex);
9201         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9202                 u64 dev_offset;
9203
9204                 /*
9205                  * check to make sure we can actually find a chunk with enough
9206                  * space to fit our block group in.
9207                  */
9208                 if (device->total_bytes > device->bytes_used + min_free &&
9209                     !device->is_tgtdev_for_dev_replace) {
9210                         ret = find_free_dev_extent(trans, device, min_free,
9211                                                    &dev_offset, NULL);
9212                         if (!ret)
9213                                 dev_nr++;
9214
9215                         if (dev_nr >= dev_min)
9216                                 break;
9217
9218                         ret = -1;
9219                 }
9220         }
9221         mutex_unlock(&root->fs_info->chunk_mutex);
9222         btrfs_end_transaction(trans, root);
9223 out:
9224         btrfs_put_block_group(block_group);
9225         return ret;
9226 }
9227
9228 static int find_first_block_group(struct btrfs_root *root,
9229                 struct btrfs_path *path, struct btrfs_key *key)
9230 {
9231         int ret = 0;
9232         struct btrfs_key found_key;
9233         struct extent_buffer *leaf;
9234         int slot;
9235
9236         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9237         if (ret < 0)
9238                 goto out;
9239
9240         while (1) {
9241                 slot = path->slots[0];
9242                 leaf = path->nodes[0];
9243                 if (slot >= btrfs_header_nritems(leaf)) {
9244                         ret = btrfs_next_leaf(root, path);
9245                         if (ret == 0)
9246                                 continue;
9247                         if (ret < 0)
9248                                 goto out;
9249                         break;
9250                 }
9251                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9252
9253                 if (found_key.objectid >= key->objectid &&
9254                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9255                         ret = 0;
9256                         goto out;
9257                 }
9258                 path->slots[0]++;
9259         }
9260 out:
9261         return ret;
9262 }
9263
9264 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9265 {
9266         struct btrfs_block_group_cache *block_group;
9267         u64 last = 0;
9268
9269         while (1) {
9270                 struct inode *inode;
9271
9272                 block_group = btrfs_lookup_first_block_group(info, last);
9273                 while (block_group) {
9274                         spin_lock(&block_group->lock);
9275                         if (block_group->iref)
9276                                 break;
9277                         spin_unlock(&block_group->lock);
9278                         block_group = next_block_group(info->tree_root,
9279                                                        block_group);
9280                 }
9281                 if (!block_group) {
9282                         if (last == 0)
9283                                 break;
9284                         last = 0;
9285                         continue;
9286                 }
9287
9288                 inode = block_group->inode;
9289                 block_group->iref = 0;
9290                 block_group->inode = NULL;
9291                 spin_unlock(&block_group->lock);
9292                 iput(inode);
9293                 last = block_group->key.objectid + block_group->key.offset;
9294                 btrfs_put_block_group(block_group);
9295         }
9296 }
9297
9298 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9299 {
9300         struct btrfs_block_group_cache *block_group;
9301         struct btrfs_space_info *space_info;
9302         struct btrfs_caching_control *caching_ctl;
9303         struct rb_node *n;
9304
9305         down_write(&info->commit_root_sem);
9306         while (!list_empty(&info->caching_block_groups)) {
9307                 caching_ctl = list_entry(info->caching_block_groups.next,
9308                                          struct btrfs_caching_control, list);
9309                 list_del(&caching_ctl->list);
9310                 put_caching_control(caching_ctl);
9311         }
9312         up_write(&info->commit_root_sem);
9313
9314         spin_lock(&info->unused_bgs_lock);
9315         while (!list_empty(&info->unused_bgs)) {
9316                 block_group = list_first_entry(&info->unused_bgs,
9317                                                struct btrfs_block_group_cache,
9318                                                bg_list);
9319                 list_del_init(&block_group->bg_list);
9320                 btrfs_put_block_group(block_group);
9321         }
9322         spin_unlock(&info->unused_bgs_lock);
9323
9324         spin_lock(&info->block_group_cache_lock);
9325         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9326                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9327                                        cache_node);
9328                 rb_erase(&block_group->cache_node,
9329                          &info->block_group_cache_tree);
9330                 RB_CLEAR_NODE(&block_group->cache_node);
9331                 spin_unlock(&info->block_group_cache_lock);
9332
9333                 down_write(&block_group->space_info->groups_sem);
9334                 list_del(&block_group->list);
9335                 up_write(&block_group->space_info->groups_sem);
9336
9337                 if (block_group->cached == BTRFS_CACHE_STARTED)
9338                         wait_block_group_cache_done(block_group);
9339
9340                 /*
9341                  * We haven't cached this block group, which means we could
9342                  * possibly have excluded extents on this block group.
9343                  */
9344                 if (block_group->cached == BTRFS_CACHE_NO ||
9345                     block_group->cached == BTRFS_CACHE_ERROR)
9346                         free_excluded_extents(info->extent_root, block_group);
9347
9348                 btrfs_remove_free_space_cache(block_group);
9349                 btrfs_put_block_group(block_group);
9350
9351                 spin_lock(&info->block_group_cache_lock);
9352         }
9353         spin_unlock(&info->block_group_cache_lock);
9354
9355         /* now that all the block groups are freed, go through and
9356          * free all the space_info structs.  This is only called during
9357          * the final stages of unmount, and so we know nobody is
9358          * using them.  We call synchronize_rcu() once before we start,
9359          * just to be on the safe side.
9360          */
9361         synchronize_rcu();
9362
9363         release_global_block_rsv(info);
9364
9365         while (!list_empty(&info->space_info)) {
9366                 int i;
9367
9368                 space_info = list_entry(info->space_info.next,
9369                                         struct btrfs_space_info,
9370                                         list);
9371                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9372                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9373                             space_info->bytes_reserved > 0 ||
9374                             space_info->bytes_may_use > 0)) {
9375                                 dump_space_info(space_info, 0, 0);
9376                         }
9377                 }
9378                 list_del(&space_info->list);
9379                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9380                         struct kobject *kobj;
9381                         kobj = space_info->block_group_kobjs[i];
9382                         space_info->block_group_kobjs[i] = NULL;
9383                         if (kobj) {
9384                                 kobject_del(kobj);
9385                                 kobject_put(kobj);
9386                         }
9387                 }
9388                 kobject_del(&space_info->kobj);
9389                 kobject_put(&space_info->kobj);
9390         }
9391         return 0;
9392 }
9393
9394 static void __link_block_group(struct btrfs_space_info *space_info,
9395                                struct btrfs_block_group_cache *cache)
9396 {
9397         int index = get_block_group_index(cache);
9398         bool first = false;
9399
9400         down_write(&space_info->groups_sem);
9401         if (list_empty(&space_info->block_groups[index]))
9402                 first = true;
9403         list_add_tail(&cache->list, &space_info->block_groups[index]);
9404         up_write(&space_info->groups_sem);
9405
9406         if (first) {
9407                 struct raid_kobject *rkobj;
9408                 int ret;
9409
9410                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9411                 if (!rkobj)
9412                         goto out_err;
9413                 rkobj->raid_type = index;
9414                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9415                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9416                                   "%s", get_raid_name(index));
9417                 if (ret) {
9418                         kobject_put(&rkobj->kobj);
9419                         goto out_err;
9420                 }
9421                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9422         }
9423
9424         return;
9425 out_err:
9426         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9427 }
9428
9429 static struct btrfs_block_group_cache *
9430 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9431 {
9432         struct btrfs_block_group_cache *cache;
9433
9434         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9435         if (!cache)
9436                 return NULL;
9437
9438         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9439                                         GFP_NOFS);
9440         if (!cache->free_space_ctl) {
9441                 kfree(cache);
9442                 return NULL;
9443         }
9444
9445         cache->key.objectid = start;
9446         cache->key.offset = size;
9447         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9448
9449         cache->sectorsize = root->sectorsize;
9450         cache->fs_info = root->fs_info;
9451         cache->full_stripe_len = btrfs_full_stripe_len(root,
9452                                                &root->fs_info->mapping_tree,
9453                                                start);
9454         atomic_set(&cache->count, 1);
9455         spin_lock_init(&cache->lock);
9456         init_rwsem(&cache->data_rwsem);
9457         INIT_LIST_HEAD(&cache->list);
9458         INIT_LIST_HEAD(&cache->cluster_list);
9459         INIT_LIST_HEAD(&cache->bg_list);
9460         INIT_LIST_HEAD(&cache->ro_list);
9461         INIT_LIST_HEAD(&cache->dirty_list);
9462         INIT_LIST_HEAD(&cache->io_list);
9463         btrfs_init_free_space_ctl(cache);
9464         atomic_set(&cache->trimming, 0);
9465
9466         return cache;
9467 }
9468
9469 int btrfs_read_block_groups(struct btrfs_root *root)
9470 {
9471         struct btrfs_path *path;
9472         int ret;
9473         struct btrfs_block_group_cache *cache;
9474         struct btrfs_fs_info *info = root->fs_info;
9475         struct btrfs_space_info *space_info;
9476         struct btrfs_key key;
9477         struct btrfs_key found_key;
9478         struct extent_buffer *leaf;
9479         int need_clear = 0;
9480         u64 cache_gen;
9481
9482         root = info->extent_root;
9483         key.objectid = 0;
9484         key.offset = 0;
9485         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9486         path = btrfs_alloc_path();
9487         if (!path)
9488                 return -ENOMEM;
9489         path->reada = 1;
9490
9491         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9492         if (btrfs_test_opt(root, SPACE_CACHE) &&
9493             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9494                 need_clear = 1;
9495         if (btrfs_test_opt(root, CLEAR_CACHE))
9496                 need_clear = 1;
9497
9498         while (1) {
9499                 ret = find_first_block_group(root, path, &key);
9500                 if (ret > 0)
9501                         break;
9502                 if (ret != 0)
9503                         goto error;
9504
9505                 leaf = path->nodes[0];
9506                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9507
9508                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9509                                                        found_key.offset);
9510                 if (!cache) {
9511                         ret = -ENOMEM;
9512                         goto error;
9513                 }
9514
9515                 if (need_clear) {
9516                         /*
9517                          * When we mount with old space cache, we need to
9518                          * set BTRFS_DC_CLEAR and set dirty flag.
9519                          *
9520                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9521                          *    truncate the old free space cache inode and
9522                          *    setup a new one.
9523                          * b) Setting 'dirty flag' makes sure that we flush
9524                          *    the new space cache info onto disk.
9525                          */
9526                         if (btrfs_test_opt(root, SPACE_CACHE))
9527                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9528                 }
9529
9530                 read_extent_buffer(leaf, &cache->item,
9531                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9532                                    sizeof(cache->item));
9533                 cache->flags = btrfs_block_group_flags(&cache->item);
9534
9535                 key.objectid = found_key.objectid + found_key.offset;
9536                 btrfs_release_path(path);
9537
9538                 /*
9539                  * We need to exclude the super stripes now so that the space
9540                  * info has super bytes accounted for, otherwise we'll think
9541                  * we have more space than we actually do.
9542                  */
9543                 ret = exclude_super_stripes(root, cache);
9544                 if (ret) {
9545                         /*
9546                          * We may have excluded something, so call this just in
9547                          * case.
9548                          */
9549                         free_excluded_extents(root, cache);
9550                         btrfs_put_block_group(cache);
9551                         goto error;
9552                 }
9553
9554                 /*
9555                  * check for two cases, either we are full, and therefore
9556                  * don't need to bother with the caching work since we won't
9557                  * find any space, or we are empty, and we can just add all
9558                  * the space in and be done with it.  This saves us _alot_ of
9559                  * time, particularly in the full case.
9560                  */
9561                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9562                         cache->last_byte_to_unpin = (u64)-1;
9563                         cache->cached = BTRFS_CACHE_FINISHED;
9564                         free_excluded_extents(root, cache);
9565                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9566                         cache->last_byte_to_unpin = (u64)-1;
9567                         cache->cached = BTRFS_CACHE_FINISHED;
9568                         add_new_free_space(cache, root->fs_info,
9569                                            found_key.objectid,
9570                                            found_key.objectid +
9571                                            found_key.offset);
9572                         free_excluded_extents(root, cache);
9573                 }
9574
9575                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9576                 if (ret) {
9577                         btrfs_remove_free_space_cache(cache);
9578                         btrfs_put_block_group(cache);
9579                         goto error;
9580                 }
9581
9582                 ret = update_space_info(info, cache->flags, found_key.offset,
9583                                         btrfs_block_group_used(&cache->item),
9584                                         &space_info);
9585                 if (ret) {
9586                         btrfs_remove_free_space_cache(cache);
9587                         spin_lock(&info->block_group_cache_lock);
9588                         rb_erase(&cache->cache_node,
9589                                  &info->block_group_cache_tree);
9590                         RB_CLEAR_NODE(&cache->cache_node);
9591                         spin_unlock(&info->block_group_cache_lock);
9592                         btrfs_put_block_group(cache);
9593                         goto error;
9594                 }
9595
9596                 cache->space_info = space_info;
9597                 spin_lock(&cache->space_info->lock);
9598                 cache->space_info->bytes_readonly += cache->bytes_super;
9599                 spin_unlock(&cache->space_info->lock);
9600
9601                 __link_block_group(space_info, cache);
9602
9603                 set_avail_alloc_bits(root->fs_info, cache->flags);
9604                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9605                         inc_block_group_ro(cache, 1);
9606                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9607                         spin_lock(&info->unused_bgs_lock);
9608                         /* Should always be true but just in case. */
9609                         if (list_empty(&cache->bg_list)) {
9610                                 btrfs_get_block_group(cache);
9611                                 list_add_tail(&cache->bg_list,
9612                                               &info->unused_bgs);
9613                         }
9614                         spin_unlock(&info->unused_bgs_lock);
9615                 }
9616         }
9617
9618         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9619                 if (!(get_alloc_profile(root, space_info->flags) &
9620                       (BTRFS_BLOCK_GROUP_RAID10 |
9621                        BTRFS_BLOCK_GROUP_RAID1 |
9622                        BTRFS_BLOCK_GROUP_RAID5 |
9623                        BTRFS_BLOCK_GROUP_RAID6 |
9624                        BTRFS_BLOCK_GROUP_DUP)))
9625                         continue;
9626                 /*
9627                  * avoid allocating from un-mirrored block group if there are
9628                  * mirrored block groups.
9629                  */
9630                 list_for_each_entry(cache,
9631                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9632                                 list)
9633                         inc_block_group_ro(cache, 1);
9634                 list_for_each_entry(cache,
9635                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9636                                 list)
9637                         inc_block_group_ro(cache, 1);
9638         }
9639
9640         init_global_block_rsv(info);
9641         ret = 0;
9642 error:
9643         btrfs_free_path(path);
9644         return ret;
9645 }
9646
9647 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9648                                        struct btrfs_root *root)
9649 {
9650         struct btrfs_block_group_cache *block_group, *tmp;
9651         struct btrfs_root *extent_root = root->fs_info->extent_root;
9652         struct btrfs_block_group_item item;
9653         struct btrfs_key key;
9654         int ret = 0;
9655         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9656
9657         trans->can_flush_pending_bgs = false;
9658         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9659                 if (ret)
9660                         goto next;
9661
9662                 spin_lock(&block_group->lock);
9663                 memcpy(&item, &block_group->item, sizeof(item));
9664                 memcpy(&key, &block_group->key, sizeof(key));
9665                 spin_unlock(&block_group->lock);
9666
9667                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9668                                         sizeof(item));
9669                 if (ret)
9670                         btrfs_abort_transaction(trans, extent_root, ret);
9671                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9672                                                key.objectid, key.offset);
9673                 if (ret)
9674                         btrfs_abort_transaction(trans, extent_root, ret);
9675 next:
9676                 list_del_init(&block_group->bg_list);
9677         }
9678         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9679 }
9680
9681 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9682                            struct btrfs_root *root, u64 bytes_used,
9683                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9684                            u64 size)
9685 {
9686         int ret;
9687         struct btrfs_root *extent_root;
9688         struct btrfs_block_group_cache *cache;
9689
9690         extent_root = root->fs_info->extent_root;
9691
9692         btrfs_set_log_full_commit(root->fs_info, trans);
9693
9694         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9695         if (!cache)
9696                 return -ENOMEM;
9697
9698         btrfs_set_block_group_used(&cache->item, bytes_used);
9699         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9700         btrfs_set_block_group_flags(&cache->item, type);
9701
9702         cache->flags = type;
9703         cache->last_byte_to_unpin = (u64)-1;
9704         cache->cached = BTRFS_CACHE_FINISHED;
9705         ret = exclude_super_stripes(root, cache);
9706         if (ret) {
9707                 /*
9708                  * We may have excluded something, so call this just in
9709                  * case.
9710                  */
9711                 free_excluded_extents(root, cache);
9712                 btrfs_put_block_group(cache);
9713                 return ret;
9714         }
9715
9716         add_new_free_space(cache, root->fs_info, chunk_offset,
9717                            chunk_offset + size);
9718
9719         free_excluded_extents(root, cache);
9720
9721 #ifdef CONFIG_BTRFS_DEBUG
9722         if (btrfs_should_fragment_free_space(root, cache)) {
9723                 u64 new_bytes_used = size - bytes_used;
9724
9725                 bytes_used += new_bytes_used >> 1;
9726                 fragment_free_space(root, cache);
9727         }
9728 #endif
9729         /*
9730          * Call to ensure the corresponding space_info object is created and
9731          * assigned to our block group, but don't update its counters just yet.
9732          * We want our bg to be added to the rbtree with its ->space_info set.
9733          */
9734         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9735                                 &cache->space_info);
9736         if (ret) {
9737                 btrfs_remove_free_space_cache(cache);
9738                 btrfs_put_block_group(cache);
9739                 return ret;
9740         }
9741
9742         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9743         if (ret) {
9744                 btrfs_remove_free_space_cache(cache);
9745                 btrfs_put_block_group(cache);
9746                 return ret;
9747         }
9748
9749         /*
9750          * Now that our block group has its ->space_info set and is inserted in
9751          * the rbtree, update the space info's counters.
9752          */
9753         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9754                                 &cache->space_info);
9755         if (ret) {
9756                 btrfs_remove_free_space_cache(cache);
9757                 spin_lock(&root->fs_info->block_group_cache_lock);
9758                 rb_erase(&cache->cache_node,
9759                          &root->fs_info->block_group_cache_tree);
9760                 RB_CLEAR_NODE(&cache->cache_node);
9761                 spin_unlock(&root->fs_info->block_group_cache_lock);
9762                 btrfs_put_block_group(cache);
9763                 return ret;
9764         }
9765         update_global_block_rsv(root->fs_info);
9766
9767         spin_lock(&cache->space_info->lock);
9768         cache->space_info->bytes_readonly += cache->bytes_super;
9769         spin_unlock(&cache->space_info->lock);
9770
9771         __link_block_group(cache->space_info, cache);
9772
9773         list_add_tail(&cache->bg_list, &trans->new_bgs);
9774
9775         set_avail_alloc_bits(extent_root->fs_info, type);
9776
9777         return 0;
9778 }
9779
9780 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9781 {
9782         u64 extra_flags = chunk_to_extended(flags) &
9783                                 BTRFS_EXTENDED_PROFILE_MASK;
9784
9785         write_seqlock(&fs_info->profiles_lock);
9786         if (flags & BTRFS_BLOCK_GROUP_DATA)
9787                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9788         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9789                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9790         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9791                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9792         write_sequnlock(&fs_info->profiles_lock);
9793 }
9794
9795 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9796                              struct btrfs_root *root, u64 group_start,
9797                              struct extent_map *em)
9798 {
9799         struct btrfs_path *path;
9800         struct btrfs_block_group_cache *block_group;
9801         struct btrfs_free_cluster *cluster;
9802         struct btrfs_root *tree_root = root->fs_info->tree_root;
9803         struct btrfs_key key;
9804         struct inode *inode;
9805         struct kobject *kobj = NULL;
9806         int ret;
9807         int index;
9808         int factor;
9809         struct btrfs_caching_control *caching_ctl = NULL;
9810         bool remove_em;
9811
9812         root = root->fs_info->extent_root;
9813
9814         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9815         BUG_ON(!block_group);
9816         BUG_ON(!block_group->ro);
9817
9818         /*
9819          * Free the reserved super bytes from this block group before
9820          * remove it.
9821          */
9822         free_excluded_extents(root, block_group);
9823
9824         memcpy(&key, &block_group->key, sizeof(key));
9825         index = get_block_group_index(block_group);
9826         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9827                                   BTRFS_BLOCK_GROUP_RAID1 |
9828                                   BTRFS_BLOCK_GROUP_RAID10))
9829                 factor = 2;
9830         else
9831                 factor = 1;
9832
9833         /* make sure this block group isn't part of an allocation cluster */
9834         cluster = &root->fs_info->data_alloc_cluster;
9835         spin_lock(&cluster->refill_lock);
9836         btrfs_return_cluster_to_free_space(block_group, cluster);
9837         spin_unlock(&cluster->refill_lock);
9838
9839         /*
9840          * make sure this block group isn't part of a metadata
9841          * allocation cluster
9842          */
9843         cluster = &root->fs_info->meta_alloc_cluster;
9844         spin_lock(&cluster->refill_lock);
9845         btrfs_return_cluster_to_free_space(block_group, cluster);
9846         spin_unlock(&cluster->refill_lock);
9847
9848         path = btrfs_alloc_path();
9849         if (!path) {
9850                 ret = -ENOMEM;
9851                 goto out;
9852         }
9853
9854         /*
9855          * get the inode first so any iput calls done for the io_list
9856          * aren't the final iput (no unlinks allowed now)
9857          */
9858         inode = lookup_free_space_inode(tree_root, block_group, path);
9859
9860         mutex_lock(&trans->transaction->cache_write_mutex);
9861         /*
9862          * make sure our free spache cache IO is done before remove the
9863          * free space inode
9864          */
9865         spin_lock(&trans->transaction->dirty_bgs_lock);
9866         if (!list_empty(&block_group->io_list)) {
9867                 list_del_init(&block_group->io_list);
9868
9869                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9870
9871                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9872                 btrfs_wait_cache_io(root, trans, block_group,
9873                                     &block_group->io_ctl, path,
9874                                     block_group->key.objectid);
9875                 btrfs_put_block_group(block_group);
9876                 spin_lock(&trans->transaction->dirty_bgs_lock);
9877         }
9878
9879         if (!list_empty(&block_group->dirty_list)) {
9880                 list_del_init(&block_group->dirty_list);
9881                 btrfs_put_block_group(block_group);
9882         }
9883         spin_unlock(&trans->transaction->dirty_bgs_lock);
9884         mutex_unlock(&trans->transaction->cache_write_mutex);
9885
9886         if (!IS_ERR(inode)) {
9887                 ret = btrfs_orphan_add(trans, inode);
9888                 if (ret) {
9889                         btrfs_add_delayed_iput(inode);
9890                         goto out;
9891                 }
9892                 clear_nlink(inode);
9893                 /* One for the block groups ref */
9894                 spin_lock(&block_group->lock);
9895                 if (block_group->iref) {
9896                         block_group->iref = 0;
9897                         block_group->inode = NULL;
9898                         spin_unlock(&block_group->lock);
9899                         iput(inode);
9900                 } else {
9901                         spin_unlock(&block_group->lock);
9902                 }
9903                 /* One for our lookup ref */
9904                 btrfs_add_delayed_iput(inode);
9905         }
9906
9907         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9908         key.offset = block_group->key.objectid;
9909         key.type = 0;
9910
9911         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9912         if (ret < 0)
9913                 goto out;
9914         if (ret > 0)
9915                 btrfs_release_path(path);
9916         if (ret == 0) {
9917                 ret = btrfs_del_item(trans, tree_root, path);
9918                 if (ret)
9919                         goto out;
9920                 btrfs_release_path(path);
9921         }
9922
9923         spin_lock(&root->fs_info->block_group_cache_lock);
9924         rb_erase(&block_group->cache_node,
9925                  &root->fs_info->block_group_cache_tree);
9926         RB_CLEAR_NODE(&block_group->cache_node);
9927
9928         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9929                 root->fs_info->first_logical_byte = (u64)-1;
9930         spin_unlock(&root->fs_info->block_group_cache_lock);
9931
9932         down_write(&block_group->space_info->groups_sem);
9933         /*
9934          * we must use list_del_init so people can check to see if they
9935          * are still on the list after taking the semaphore
9936          */
9937         list_del_init(&block_group->list);
9938         if (list_empty(&block_group->space_info->block_groups[index])) {
9939                 kobj = block_group->space_info->block_group_kobjs[index];
9940                 block_group->space_info->block_group_kobjs[index] = NULL;
9941                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9942         }
9943         up_write(&block_group->space_info->groups_sem);
9944         if (kobj) {
9945                 kobject_del(kobj);
9946                 kobject_put(kobj);
9947         }
9948
9949         if (block_group->has_caching_ctl)
9950                 caching_ctl = get_caching_control(block_group);
9951         if (block_group->cached == BTRFS_CACHE_STARTED)
9952                 wait_block_group_cache_done(block_group);
9953         if (block_group->has_caching_ctl) {
9954                 down_write(&root->fs_info->commit_root_sem);
9955                 if (!caching_ctl) {
9956                         struct btrfs_caching_control *ctl;
9957
9958                         list_for_each_entry(ctl,
9959                                     &root->fs_info->caching_block_groups, list)
9960                                 if (ctl->block_group == block_group) {
9961                                         caching_ctl = ctl;
9962                                         atomic_inc(&caching_ctl->count);
9963                                         break;
9964                                 }
9965                 }
9966                 if (caching_ctl)
9967                         list_del_init(&caching_ctl->list);
9968                 up_write(&root->fs_info->commit_root_sem);
9969                 if (caching_ctl) {
9970                         /* Once for the caching bgs list and once for us. */
9971                         put_caching_control(caching_ctl);
9972                         put_caching_control(caching_ctl);
9973                 }
9974         }
9975
9976         spin_lock(&trans->transaction->dirty_bgs_lock);
9977         if (!list_empty(&block_group->dirty_list)) {
9978                 WARN_ON(1);
9979         }
9980         if (!list_empty(&block_group->io_list)) {
9981                 WARN_ON(1);
9982         }
9983         spin_unlock(&trans->transaction->dirty_bgs_lock);
9984         btrfs_remove_free_space_cache(block_group);
9985
9986         spin_lock(&block_group->space_info->lock);
9987         list_del_init(&block_group->ro_list);
9988
9989         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9990                 WARN_ON(block_group->space_info->total_bytes
9991                         < block_group->key.offset);
9992                 WARN_ON(block_group->space_info->bytes_readonly
9993                         < block_group->key.offset);
9994                 WARN_ON(block_group->space_info->disk_total
9995                         < block_group->key.offset * factor);
9996         }
9997         block_group->space_info->total_bytes -= block_group->key.offset;
9998         block_group->space_info->bytes_readonly -= block_group->key.offset;
9999         block_group->space_info->disk_total -= block_group->key.offset * factor;
10000
10001         spin_unlock(&block_group->space_info->lock);
10002
10003         memcpy(&key, &block_group->key, sizeof(key));
10004
10005         lock_chunks(root);
10006         if (!list_empty(&em->list)) {
10007                 /* We're in the transaction->pending_chunks list. */
10008                 free_extent_map(em);
10009         }
10010         spin_lock(&block_group->lock);
10011         block_group->removed = 1;
10012         /*
10013          * At this point trimming can't start on this block group, because we
10014          * removed the block group from the tree fs_info->block_group_cache_tree
10015          * so no one can't find it anymore and even if someone already got this
10016          * block group before we removed it from the rbtree, they have already
10017          * incremented block_group->trimming - if they didn't, they won't find
10018          * any free space entries because we already removed them all when we
10019          * called btrfs_remove_free_space_cache().
10020          *
10021          * And we must not remove the extent map from the fs_info->mapping_tree
10022          * to prevent the same logical address range and physical device space
10023          * ranges from being reused for a new block group. This is because our
10024          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10025          * completely transactionless, so while it is trimming a range the
10026          * currently running transaction might finish and a new one start,
10027          * allowing for new block groups to be created that can reuse the same
10028          * physical device locations unless we take this special care.
10029          *
10030          * There may also be an implicit trim operation if the file system
10031          * is mounted with -odiscard. The same protections must remain
10032          * in place until the extents have been discarded completely when
10033          * the transaction commit has completed.
10034          */
10035         remove_em = (atomic_read(&block_group->trimming) == 0);
10036         /*
10037          * Make sure a trimmer task always sees the em in the pinned_chunks list
10038          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10039          * before checking block_group->removed).
10040          */
10041         if (!remove_em) {
10042                 /*
10043                  * Our em might be in trans->transaction->pending_chunks which
10044                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10045                  * and so is the fs_info->pinned_chunks list.
10046                  *
10047                  * So at this point we must be holding the chunk_mutex to avoid
10048                  * any races with chunk allocation (more specifically at
10049                  * volumes.c:contains_pending_extent()), to ensure it always
10050                  * sees the em, either in the pending_chunks list or in the
10051                  * pinned_chunks list.
10052                  */
10053                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10054         }
10055         spin_unlock(&block_group->lock);
10056
10057         if (remove_em) {
10058                 struct extent_map_tree *em_tree;
10059
10060                 em_tree = &root->fs_info->mapping_tree.map_tree;
10061                 write_lock(&em_tree->lock);
10062                 /*
10063                  * The em might be in the pending_chunks list, so make sure the
10064                  * chunk mutex is locked, since remove_extent_mapping() will
10065                  * delete us from that list.
10066                  */
10067                 remove_extent_mapping(em_tree, em);
10068                 write_unlock(&em_tree->lock);
10069                 /* once for the tree */
10070                 free_extent_map(em);
10071         }
10072
10073         unlock_chunks(root);
10074
10075         btrfs_put_block_group(block_group);
10076         btrfs_put_block_group(block_group);
10077
10078         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10079         if (ret > 0)
10080                 ret = -EIO;
10081         if (ret < 0)
10082                 goto out;
10083
10084         ret = btrfs_del_item(trans, root, path);
10085 out:
10086         btrfs_free_path(path);
10087         return ret;
10088 }
10089
10090 /*
10091  * Process the unused_bgs list and remove any that don't have any allocated
10092  * space inside of them.
10093  */
10094 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10095 {
10096         struct btrfs_block_group_cache *block_group;
10097         struct btrfs_space_info *space_info;
10098         struct btrfs_root *root = fs_info->extent_root;
10099         struct btrfs_trans_handle *trans;
10100         int ret = 0;
10101
10102         if (!fs_info->open)
10103                 return;
10104
10105         spin_lock(&fs_info->unused_bgs_lock);
10106         while (!list_empty(&fs_info->unused_bgs)) {
10107                 u64 start, end;
10108                 int trimming;
10109
10110                 block_group = list_first_entry(&fs_info->unused_bgs,
10111                                                struct btrfs_block_group_cache,
10112                                                bg_list);
10113                 space_info = block_group->space_info;
10114                 list_del_init(&block_group->bg_list);
10115                 if (ret || btrfs_mixed_space_info(space_info)) {
10116                         btrfs_put_block_group(block_group);
10117                         continue;
10118                 }
10119                 spin_unlock(&fs_info->unused_bgs_lock);
10120
10121                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10122
10123                 /* Don't want to race with allocators so take the groups_sem */
10124                 down_write(&space_info->groups_sem);
10125                 spin_lock(&block_group->lock);
10126                 if (block_group->reserved ||
10127                     btrfs_block_group_used(&block_group->item) ||
10128                     block_group->ro) {
10129                         /*
10130                          * We want to bail if we made new allocations or have
10131                          * outstanding allocations in this block group.  We do
10132                          * the ro check in case balance is currently acting on
10133                          * this block group.
10134                          */
10135                         spin_unlock(&block_group->lock);
10136                         up_write(&space_info->groups_sem);
10137                         goto next;
10138                 }
10139                 spin_unlock(&block_group->lock);
10140
10141                 /* We don't want to force the issue, only flip if it's ok. */
10142                 ret = inc_block_group_ro(block_group, 0);
10143                 up_write(&space_info->groups_sem);
10144                 if (ret < 0) {
10145                         ret = 0;
10146                         goto next;
10147                 }
10148
10149                 /*
10150                  * Want to do this before we do anything else so we can recover
10151                  * properly if we fail to join the transaction.
10152                  */
10153                 /* 1 for btrfs_orphan_reserve_metadata() */
10154                 trans = btrfs_start_transaction(root, 1);
10155                 if (IS_ERR(trans)) {
10156                         btrfs_dec_block_group_ro(root, block_group);
10157                         ret = PTR_ERR(trans);
10158                         goto next;
10159                 }
10160
10161                 /*
10162                  * We could have pending pinned extents for this block group,
10163                  * just delete them, we don't care about them anymore.
10164                  */
10165                 start = block_group->key.objectid;
10166                 end = start + block_group->key.offset - 1;
10167                 /*
10168                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10169                  * btrfs_finish_extent_commit(). If we are at transaction N,
10170                  * another task might be running finish_extent_commit() for the
10171                  * previous transaction N - 1, and have seen a range belonging
10172                  * to the block group in freed_extents[] before we were able to
10173                  * clear the whole block group range from freed_extents[]. This
10174                  * means that task can lookup for the block group after we
10175                  * unpinned it from freed_extents[] and removed it, leading to
10176                  * a BUG_ON() at btrfs_unpin_extent_range().
10177                  */
10178                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10179                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10180                                   EXTENT_DIRTY, GFP_NOFS);
10181                 if (ret) {
10182                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10183                         btrfs_dec_block_group_ro(root, block_group);
10184                         goto end_trans;
10185                 }
10186                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10187                                   EXTENT_DIRTY, GFP_NOFS);
10188                 if (ret) {
10189                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10190                         btrfs_dec_block_group_ro(root, block_group);
10191                         goto end_trans;
10192                 }
10193                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10194
10195                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10196                 spin_lock(&space_info->lock);
10197                 spin_lock(&block_group->lock);
10198
10199                 space_info->bytes_pinned -= block_group->pinned;
10200                 space_info->bytes_readonly += block_group->pinned;
10201                 percpu_counter_add(&space_info->total_bytes_pinned,
10202                                    -block_group->pinned);
10203                 block_group->pinned = 0;
10204
10205                 spin_unlock(&block_group->lock);
10206                 spin_unlock(&space_info->lock);
10207
10208                 /* DISCARD can flip during remount */
10209                 trimming = btrfs_test_opt(root, DISCARD);
10210
10211                 /* Implicit trim during transaction commit. */
10212                 if (trimming)
10213                         btrfs_get_block_group_trimming(block_group);
10214
10215                 /*
10216                  * Btrfs_remove_chunk will abort the transaction if things go
10217                  * horribly wrong.
10218                  */
10219                 ret = btrfs_remove_chunk(trans, root,
10220                                          block_group->key.objectid);
10221
10222                 if (ret) {
10223                         if (trimming)
10224                                 btrfs_put_block_group_trimming(block_group);
10225                         goto end_trans;
10226                 }
10227
10228                 /*
10229                  * If we're not mounted with -odiscard, we can just forget
10230                  * about this block group. Otherwise we'll need to wait
10231                  * until transaction commit to do the actual discard.
10232                  */
10233                 if (trimming) {
10234                         WARN_ON(!list_empty(&block_group->bg_list));
10235                         spin_lock(&trans->transaction->deleted_bgs_lock);
10236                         list_move(&block_group->bg_list,
10237                                   &trans->transaction->deleted_bgs);
10238                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10239                         btrfs_get_block_group(block_group);
10240                 }
10241 end_trans:
10242                 btrfs_end_transaction(trans, root);
10243 next:
10244                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10245                 btrfs_put_block_group(block_group);
10246                 spin_lock(&fs_info->unused_bgs_lock);
10247         }
10248         spin_unlock(&fs_info->unused_bgs_lock);
10249 }
10250
10251 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10252 {
10253         struct btrfs_space_info *space_info;
10254         struct btrfs_super_block *disk_super;
10255         u64 features;
10256         u64 flags;
10257         int mixed = 0;
10258         int ret;
10259
10260         disk_super = fs_info->super_copy;
10261         if (!btrfs_super_root(disk_super))
10262                 return 1;
10263
10264         features = btrfs_super_incompat_flags(disk_super);
10265         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10266                 mixed = 1;
10267
10268         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10269         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10270         if (ret)
10271                 goto out;
10272
10273         if (mixed) {
10274                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10275                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10276         } else {
10277                 flags = BTRFS_BLOCK_GROUP_METADATA;
10278                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10279                 if (ret)
10280                         goto out;
10281
10282                 flags = BTRFS_BLOCK_GROUP_DATA;
10283                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10284         }
10285 out:
10286         return ret;
10287 }
10288
10289 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10290 {
10291         return unpin_extent_range(root, start, end, false);
10292 }
10293
10294 /*
10295  * It used to be that old block groups would be left around forever.
10296  * Iterating over them would be enough to trim unused space.  Since we
10297  * now automatically remove them, we also need to iterate over unallocated
10298  * space.
10299  *
10300  * We don't want a transaction for this since the discard may take a
10301  * substantial amount of time.  We don't require that a transaction be
10302  * running, but we do need to take a running transaction into account
10303  * to ensure that we're not discarding chunks that were released in
10304  * the current transaction.
10305  *
10306  * Holding the chunks lock will prevent other threads from allocating
10307  * or releasing chunks, but it won't prevent a running transaction
10308  * from committing and releasing the memory that the pending chunks
10309  * list head uses.  For that, we need to take a reference to the
10310  * transaction.
10311  */
10312 static int btrfs_trim_free_extents(struct btrfs_device *device,
10313                                    u64 minlen, u64 *trimmed)
10314 {
10315         u64 start = 0, len = 0;
10316         int ret;
10317
10318         *trimmed = 0;
10319
10320         /* Not writeable = nothing to do. */
10321         if (!device->writeable)
10322                 return 0;
10323
10324         /* No free space = nothing to do. */
10325         if (device->total_bytes <= device->bytes_used)
10326                 return 0;
10327
10328         ret = 0;
10329
10330         while (1) {
10331                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10332                 struct btrfs_transaction *trans;
10333                 u64 bytes;
10334
10335                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10336                 if (ret)
10337                         return ret;
10338
10339                 down_read(&fs_info->commit_root_sem);
10340
10341                 spin_lock(&fs_info->trans_lock);
10342                 trans = fs_info->running_transaction;
10343                 if (trans)
10344                         atomic_inc(&trans->use_count);
10345                 spin_unlock(&fs_info->trans_lock);
10346
10347                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10348                                                  &start, &len);
10349                 if (trans)
10350                         btrfs_put_transaction(trans);
10351
10352                 if (ret) {
10353                         up_read(&fs_info->commit_root_sem);
10354                         mutex_unlock(&fs_info->chunk_mutex);
10355                         if (ret == -ENOSPC)
10356                                 ret = 0;
10357                         break;
10358                 }
10359
10360                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10361                 up_read(&fs_info->commit_root_sem);
10362                 mutex_unlock(&fs_info->chunk_mutex);
10363
10364                 if (ret)
10365                         break;
10366
10367                 start += len;
10368                 *trimmed += bytes;
10369
10370                 if (fatal_signal_pending(current)) {
10371                         ret = -ERESTARTSYS;
10372                         break;
10373                 }
10374
10375                 cond_resched();
10376         }
10377
10378         return ret;
10379 }
10380
10381 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10382 {
10383         struct btrfs_fs_info *fs_info = root->fs_info;
10384         struct btrfs_block_group_cache *cache = NULL;
10385         struct btrfs_device *device;
10386         struct list_head *devices;
10387         u64 group_trimmed;
10388         u64 start;
10389         u64 end;
10390         u64 trimmed = 0;
10391         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10392         int ret = 0;
10393
10394         /*
10395          * try to trim all FS space, our block group may start from non-zero.
10396          */
10397         if (range->len == total_bytes)
10398                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10399         else
10400                 cache = btrfs_lookup_block_group(fs_info, range->start);
10401
10402         while (cache) {
10403                 if (cache->key.objectid >= (range->start + range->len)) {
10404                         btrfs_put_block_group(cache);
10405                         break;
10406                 }
10407
10408                 start = max(range->start, cache->key.objectid);
10409                 end = min(range->start + range->len,
10410                                 cache->key.objectid + cache->key.offset);
10411
10412                 if (end - start >= range->minlen) {
10413                         if (!block_group_cache_done(cache)) {
10414                                 ret = cache_block_group(cache, 0);
10415                                 if (ret) {
10416                                         btrfs_put_block_group(cache);
10417                                         break;
10418                                 }
10419                                 ret = wait_block_group_cache_done(cache);
10420                                 if (ret) {
10421                                         btrfs_put_block_group(cache);
10422                                         break;
10423                                 }
10424                         }
10425                         ret = btrfs_trim_block_group(cache,
10426                                                      &group_trimmed,
10427                                                      start,
10428                                                      end,
10429                                                      range->minlen);
10430
10431                         trimmed += group_trimmed;
10432                         if (ret) {
10433                                 btrfs_put_block_group(cache);
10434                                 break;
10435                         }
10436                 }
10437
10438                 cache = next_block_group(fs_info->tree_root, cache);
10439         }
10440
10441         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10442         devices = &root->fs_info->fs_devices->alloc_list;
10443         list_for_each_entry(device, devices, dev_alloc_list) {
10444                 ret = btrfs_trim_free_extents(device, range->minlen,
10445                                               &group_trimmed);
10446                 if (ret)
10447                         break;
10448
10449                 trimmed += group_trimmed;
10450         }
10451         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10452
10453         range->len = trimmed;
10454         return ret;
10455 }
10456
10457 /*
10458  * btrfs_{start,end}_write_no_snapshoting() are similar to
10459  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10460  * data into the page cache through nocow before the subvolume is snapshoted,
10461  * but flush the data into disk after the snapshot creation, or to prevent
10462  * operations while snapshoting is ongoing and that cause the snapshot to be
10463  * inconsistent (writes followed by expanding truncates for example).
10464  */
10465 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10466 {
10467         percpu_counter_dec(&root->subv_writers->counter);
10468         /*
10469          * Make sure counter is updated before we wake up waiters.
10470          */
10471         smp_mb();
10472         if (waitqueue_active(&root->subv_writers->wait))
10473                 wake_up(&root->subv_writers->wait);
10474 }
10475
10476 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10477 {
10478         if (atomic_read(&root->will_be_snapshoted))
10479                 return 0;
10480
10481         percpu_counter_inc(&root->subv_writers->counter);
10482         /*
10483          * Make sure counter is updated before we check for snapshot creation.
10484          */
10485         smp_mb();
10486         if (atomic_read(&root->will_be_snapshoted)) {
10487                 btrfs_end_write_no_snapshoting(root);
10488                 return 0;
10489         }
10490         return 1;
10491 }