spi: rockchip: Signal unfinished DMA transfers
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58         CHUNK_ALLOC_NO_FORCE = 0,
59         CHUNK_ALLOC_LIMITED = 1,
60         CHUNK_ALLOC_FORCE = 2,
61 };
62
63 /*
64  * Control how reservations are dealt with.
65  *
66  * RESERVE_FREE - freeing a reservation.
67  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68  *   ENOSPC accounting
69  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
70  *   bytes_may_use as the ENOSPC accounting is done elsewhere
71  */
72 enum {
73         RESERVE_FREE = 0,
74         RESERVE_ALLOC = 1,
75         RESERVE_ALLOC_NO_ACCOUNT = 2,
76 };
77
78 static int update_block_group(struct btrfs_trans_handle *trans,
79                               struct btrfs_root *root, u64 bytenr,
80                               u64 num_bytes, int alloc);
81 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
82                                 struct btrfs_root *root,
83                                 struct btrfs_delayed_ref_node *node, u64 parent,
84                                 u64 root_objectid, u64 owner_objectid,
85                                 u64 owner_offset, int refs_to_drop,
86                                 struct btrfs_delayed_extent_op *extra_op);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88                                     struct extent_buffer *leaf,
89                                     struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91                                       struct btrfs_root *root,
92                                       u64 parent, u64 root_objectid,
93                                       u64 flags, u64 owner, u64 offset,
94                                       struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96                                      struct btrfs_root *root,
97                                      u64 parent, u64 root_objectid,
98                                      u64 flags, struct btrfs_disk_key *key,
99                                      int level, struct btrfs_key *ins);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337                                 struct btrfs_block_group_cache *block_group)
338 {
339         u64 start = block_group->key.objectid;
340         u64 len = block_group->key.offset;
341         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342                 root->nodesize : root->sectorsize;
343         u64 step = chunk << 1;
344
345         while (len > chunk) {
346                 btrfs_remove_free_space(block_group, start, chunk);
347                 start += step;
348                 if (len < step)
349                         len = 0;
350                 else
351                         len -= step;
352         }
353 }
354 #endif
355
356 /*
357  * this is only called by cache_block_group, since we could have freed extents
358  * we need to check the pinned_extents for any extents that can't be used yet
359  * since their free space will be released as soon as the transaction commits.
360  */
361 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362                        struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364         u64 extent_start, extent_end, size, total_added = 0;
365         int ret;
366
367         while (start < end) {
368                 ret = find_first_extent_bit(info->pinned_extents, start,
369                                             &extent_start, &extent_end,
370                                             EXTENT_DIRTY | EXTENT_UPTODATE,
371                                             NULL);
372                 if (ret)
373                         break;
374
375                 if (extent_start <= start) {
376                         start = extent_end + 1;
377                 } else if (extent_start > start && extent_start < end) {
378                         size = extent_start - start;
379                         total_added += size;
380                         ret = btrfs_add_free_space(block_group, start,
381                                                    size);
382                         BUG_ON(ret); /* -ENOMEM or logic error */
383                         start = extent_end + 1;
384                 } else {
385                         break;
386                 }
387         }
388
389         if (start < end) {
390                 size = end - start;
391                 total_added += size;
392                 ret = btrfs_add_free_space(block_group, start, size);
393                 BUG_ON(ret); /* -ENOMEM or logic error */
394         }
395
396         return total_added;
397 }
398
399 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
400 {
401         struct btrfs_block_group_cache *block_group;
402         struct btrfs_fs_info *fs_info;
403         struct btrfs_root *extent_root;
404         struct btrfs_path *path;
405         struct extent_buffer *leaf;
406         struct btrfs_key key;
407         u64 total_found = 0;
408         u64 last = 0;
409         u32 nritems;
410         int ret;
411         bool wakeup = true;
412
413         block_group = caching_ctl->block_group;
414         fs_info = block_group->fs_info;
415         extent_root = fs_info->extent_root;
416
417         path = btrfs_alloc_path();
418         if (!path)
419                 return -ENOMEM;
420
421         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
422
423 #ifdef CONFIG_BTRFS_DEBUG
424         /*
425          * If we're fragmenting we don't want to make anybody think we can
426          * allocate from this block group until we've had a chance to fragment
427          * the free space.
428          */
429         if (btrfs_should_fragment_free_space(extent_root, block_group))
430                 wakeup = false;
431 #endif
432         /*
433          * We don't want to deadlock with somebody trying to allocate a new
434          * extent for the extent root while also trying to search the extent
435          * root to add free space.  So we skip locking and search the commit
436          * root, since its read-only
437          */
438         path->skip_locking = 1;
439         path->search_commit_root = 1;
440         path->reada = READA_FORWARD;
441
442         key.objectid = last;
443         key.offset = 0;
444         key.type = BTRFS_EXTENT_ITEM_KEY;
445
446 next:
447         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
448         if (ret < 0)
449                 goto out;
450
451         leaf = path->nodes[0];
452         nritems = btrfs_header_nritems(leaf);
453
454         while (1) {
455                 if (btrfs_fs_closing(fs_info) > 1) {
456                         last = (u64)-1;
457                         break;
458                 }
459
460                 if (path->slots[0] < nritems) {
461                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
462                 } else {
463                         ret = find_next_key(path, 0, &key);
464                         if (ret)
465                                 break;
466
467                         if (need_resched() ||
468                             rwsem_is_contended(&fs_info->commit_root_sem)) {
469                                 if (wakeup)
470                                         caching_ctl->progress = last;
471                                 btrfs_release_path(path);
472                                 up_read(&fs_info->commit_root_sem);
473                                 mutex_unlock(&caching_ctl->mutex);
474                                 cond_resched();
475                                 mutex_lock(&caching_ctl->mutex);
476                                 down_read(&fs_info->commit_root_sem);
477                                 goto next;
478                         }
479
480                         ret = btrfs_next_leaf(extent_root, path);
481                         if (ret < 0)
482                                 goto out;
483                         if (ret)
484                                 break;
485                         leaf = path->nodes[0];
486                         nritems = btrfs_header_nritems(leaf);
487                         continue;
488                 }
489
490                 if (key.objectid < last) {
491                         key.objectid = last;
492                         key.offset = 0;
493                         key.type = BTRFS_EXTENT_ITEM_KEY;
494
495                         if (wakeup)
496                                 caching_ctl->progress = last;
497                         btrfs_release_path(path);
498                         goto next;
499                 }
500
501                 if (key.objectid < block_group->key.objectid) {
502                         path->slots[0]++;
503                         continue;
504                 }
505
506                 if (key.objectid >= block_group->key.objectid +
507                     block_group->key.offset)
508                         break;
509
510                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
511                     key.type == BTRFS_METADATA_ITEM_KEY) {
512                         total_found += add_new_free_space(block_group,
513                                                           fs_info, last,
514                                                           key.objectid);
515                         if (key.type == BTRFS_METADATA_ITEM_KEY)
516                                 last = key.objectid +
517                                         fs_info->tree_root->nodesize;
518                         else
519                                 last = key.objectid + key.offset;
520
521                         if (total_found > CACHING_CTL_WAKE_UP) {
522                                 total_found = 0;
523                                 if (wakeup)
524                                         wake_up(&caching_ctl->wait);
525                         }
526                 }
527                 path->slots[0]++;
528         }
529         ret = 0;
530
531         total_found += add_new_free_space(block_group, fs_info, last,
532                                           block_group->key.objectid +
533                                           block_group->key.offset);
534         caching_ctl->progress = (u64)-1;
535
536 out:
537         btrfs_free_path(path);
538         return ret;
539 }
540
541 static noinline void caching_thread(struct btrfs_work *work)
542 {
543         struct btrfs_block_group_cache *block_group;
544         struct btrfs_fs_info *fs_info;
545         struct btrfs_caching_control *caching_ctl;
546         struct btrfs_root *extent_root;
547         int ret;
548
549         caching_ctl = container_of(work, struct btrfs_caching_control, work);
550         block_group = caching_ctl->block_group;
551         fs_info = block_group->fs_info;
552         extent_root = fs_info->extent_root;
553
554         mutex_lock(&caching_ctl->mutex);
555         down_read(&fs_info->commit_root_sem);
556
557         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
558                 ret = load_free_space_tree(caching_ctl);
559         else
560                 ret = load_extent_tree_free(caching_ctl);
561
562         spin_lock(&block_group->lock);
563         block_group->caching_ctl = NULL;
564         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
565         spin_unlock(&block_group->lock);
566
567 #ifdef CONFIG_BTRFS_DEBUG
568         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
569                 u64 bytes_used;
570
571                 spin_lock(&block_group->space_info->lock);
572                 spin_lock(&block_group->lock);
573                 bytes_used = block_group->key.offset -
574                         btrfs_block_group_used(&block_group->item);
575                 block_group->space_info->bytes_used += bytes_used >> 1;
576                 spin_unlock(&block_group->lock);
577                 spin_unlock(&block_group->space_info->lock);
578                 fragment_free_space(extent_root, block_group);
579         }
580 #endif
581
582         caching_ctl->progress = (u64)-1;
583
584         up_read(&fs_info->commit_root_sem);
585         free_excluded_extents(fs_info->extent_root, block_group);
586         mutex_unlock(&caching_ctl->mutex);
587
588         wake_up(&caching_ctl->wait);
589
590         put_caching_control(caching_ctl);
591         btrfs_put_block_group(block_group);
592 }
593
594 static int cache_block_group(struct btrfs_block_group_cache *cache,
595                              int load_cache_only)
596 {
597         DEFINE_WAIT(wait);
598         struct btrfs_fs_info *fs_info = cache->fs_info;
599         struct btrfs_caching_control *caching_ctl;
600         int ret = 0;
601
602         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
603         if (!caching_ctl)
604                 return -ENOMEM;
605
606         INIT_LIST_HEAD(&caching_ctl->list);
607         mutex_init(&caching_ctl->mutex);
608         init_waitqueue_head(&caching_ctl->wait);
609         caching_ctl->block_group = cache;
610         caching_ctl->progress = cache->key.objectid;
611         atomic_set(&caching_ctl->count, 1);
612         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
613                         caching_thread, NULL, NULL);
614
615         spin_lock(&cache->lock);
616         /*
617          * This should be a rare occasion, but this could happen I think in the
618          * case where one thread starts to load the space cache info, and then
619          * some other thread starts a transaction commit which tries to do an
620          * allocation while the other thread is still loading the space cache
621          * info.  The previous loop should have kept us from choosing this block
622          * group, but if we've moved to the state where we will wait on caching
623          * block groups we need to first check if we're doing a fast load here,
624          * so we can wait for it to finish, otherwise we could end up allocating
625          * from a block group who's cache gets evicted for one reason or
626          * another.
627          */
628         while (cache->cached == BTRFS_CACHE_FAST) {
629                 struct btrfs_caching_control *ctl;
630
631                 ctl = cache->caching_ctl;
632                 atomic_inc(&ctl->count);
633                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
634                 spin_unlock(&cache->lock);
635
636                 schedule();
637
638                 finish_wait(&ctl->wait, &wait);
639                 put_caching_control(ctl);
640                 spin_lock(&cache->lock);
641         }
642
643         if (cache->cached != BTRFS_CACHE_NO) {
644                 spin_unlock(&cache->lock);
645                 kfree(caching_ctl);
646                 return 0;
647         }
648         WARN_ON(cache->caching_ctl);
649         cache->caching_ctl = caching_ctl;
650         cache->cached = BTRFS_CACHE_FAST;
651         spin_unlock(&cache->lock);
652
653         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
654                 mutex_lock(&caching_ctl->mutex);
655                 ret = load_free_space_cache(fs_info, cache);
656
657                 spin_lock(&cache->lock);
658                 if (ret == 1) {
659                         cache->caching_ctl = NULL;
660                         cache->cached = BTRFS_CACHE_FINISHED;
661                         cache->last_byte_to_unpin = (u64)-1;
662                         caching_ctl->progress = (u64)-1;
663                 } else {
664                         if (load_cache_only) {
665                                 cache->caching_ctl = NULL;
666                                 cache->cached = BTRFS_CACHE_NO;
667                         } else {
668                                 cache->cached = BTRFS_CACHE_STARTED;
669                                 cache->has_caching_ctl = 1;
670                         }
671                 }
672                 spin_unlock(&cache->lock);
673 #ifdef CONFIG_BTRFS_DEBUG
674                 if (ret == 1 &&
675                     btrfs_should_fragment_free_space(fs_info->extent_root,
676                                                      cache)) {
677                         u64 bytes_used;
678
679                         spin_lock(&cache->space_info->lock);
680                         spin_lock(&cache->lock);
681                         bytes_used = cache->key.offset -
682                                 btrfs_block_group_used(&cache->item);
683                         cache->space_info->bytes_used += bytes_used >> 1;
684                         spin_unlock(&cache->lock);
685                         spin_unlock(&cache->space_info->lock);
686                         fragment_free_space(fs_info->extent_root, cache);
687                 }
688 #endif
689                 mutex_unlock(&caching_ctl->mutex);
690
691                 wake_up(&caching_ctl->wait);
692                 if (ret == 1) {
693                         put_caching_control(caching_ctl);
694                         free_excluded_extents(fs_info->extent_root, cache);
695                         return 0;
696                 }
697         } else {
698                 /*
699                  * We're either using the free space tree or no caching at all.
700                  * Set cached to the appropriate value and wakeup any waiters.
701                  */
702                 spin_lock(&cache->lock);
703                 if (load_cache_only) {
704                         cache->caching_ctl = NULL;
705                         cache->cached = BTRFS_CACHE_NO;
706                 } else {
707                         cache->cached = BTRFS_CACHE_STARTED;
708                         cache->has_caching_ctl = 1;
709                 }
710                 spin_unlock(&cache->lock);
711                 wake_up(&caching_ctl->wait);
712         }
713
714         if (load_cache_only) {
715                 put_caching_control(caching_ctl);
716                 return 0;
717         }
718
719         down_write(&fs_info->commit_root_sem);
720         atomic_inc(&caching_ctl->count);
721         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
722         up_write(&fs_info->commit_root_sem);
723
724         btrfs_get_block_group(cache);
725
726         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
727
728         return ret;
729 }
730
731 /*
732  * return the block group that starts at or after bytenr
733  */
734 static struct btrfs_block_group_cache *
735 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
736 {
737         struct btrfs_block_group_cache *cache;
738
739         cache = block_group_cache_tree_search(info, bytenr, 0);
740
741         return cache;
742 }
743
744 /*
745  * return the block group that contains the given bytenr
746  */
747 struct btrfs_block_group_cache *btrfs_lookup_block_group(
748                                                  struct btrfs_fs_info *info,
749                                                  u64 bytenr)
750 {
751         struct btrfs_block_group_cache *cache;
752
753         cache = block_group_cache_tree_search(info, bytenr, 1);
754
755         return cache;
756 }
757
758 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
759                                                   u64 flags)
760 {
761         struct list_head *head = &info->space_info;
762         struct btrfs_space_info *found;
763
764         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
765
766         rcu_read_lock();
767         list_for_each_entry_rcu(found, head, list) {
768                 if (found->flags & flags) {
769                         rcu_read_unlock();
770                         return found;
771                 }
772         }
773         rcu_read_unlock();
774         return NULL;
775 }
776
777 /*
778  * after adding space to the filesystem, we need to clear the full flags
779  * on all the space infos.
780  */
781 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
782 {
783         struct list_head *head = &info->space_info;
784         struct btrfs_space_info *found;
785
786         rcu_read_lock();
787         list_for_each_entry_rcu(found, head, list)
788                 found->full = 0;
789         rcu_read_unlock();
790 }
791
792 /* simple helper to search for an existing data extent at a given offset */
793 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
794 {
795         int ret;
796         struct btrfs_key key;
797         struct btrfs_path *path;
798
799         path = btrfs_alloc_path();
800         if (!path)
801                 return -ENOMEM;
802
803         key.objectid = start;
804         key.offset = len;
805         key.type = BTRFS_EXTENT_ITEM_KEY;
806         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
807                                 0, 0);
808         btrfs_free_path(path);
809         return ret;
810 }
811
812 /*
813  * helper function to lookup reference count and flags of a tree block.
814  *
815  * the head node for delayed ref is used to store the sum of all the
816  * reference count modifications queued up in the rbtree. the head
817  * node may also store the extent flags to set. This way you can check
818  * to see what the reference count and extent flags would be if all of
819  * the delayed refs are not processed.
820  */
821 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
822                              struct btrfs_root *root, u64 bytenr,
823                              u64 offset, int metadata, u64 *refs, u64 *flags)
824 {
825         struct btrfs_delayed_ref_head *head;
826         struct btrfs_delayed_ref_root *delayed_refs;
827         struct btrfs_path *path;
828         struct btrfs_extent_item *ei;
829         struct extent_buffer *leaf;
830         struct btrfs_key key;
831         u32 item_size;
832         u64 num_refs;
833         u64 extent_flags;
834         int ret;
835
836         /*
837          * If we don't have skinny metadata, don't bother doing anything
838          * different
839          */
840         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
841                 offset = root->nodesize;
842                 metadata = 0;
843         }
844
845         path = btrfs_alloc_path();
846         if (!path)
847                 return -ENOMEM;
848
849         if (!trans) {
850                 path->skip_locking = 1;
851                 path->search_commit_root = 1;
852         }
853
854 search_again:
855         key.objectid = bytenr;
856         key.offset = offset;
857         if (metadata)
858                 key.type = BTRFS_METADATA_ITEM_KEY;
859         else
860                 key.type = BTRFS_EXTENT_ITEM_KEY;
861
862         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
863                                 &key, path, 0, 0);
864         if (ret < 0)
865                 goto out_free;
866
867         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
868                 if (path->slots[0]) {
869                         path->slots[0]--;
870                         btrfs_item_key_to_cpu(path->nodes[0], &key,
871                                               path->slots[0]);
872                         if (key.objectid == bytenr &&
873                             key.type == BTRFS_EXTENT_ITEM_KEY &&
874                             key.offset == root->nodesize)
875                                 ret = 0;
876                 }
877         }
878
879         if (ret == 0) {
880                 leaf = path->nodes[0];
881                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
882                 if (item_size >= sizeof(*ei)) {
883                         ei = btrfs_item_ptr(leaf, path->slots[0],
884                                             struct btrfs_extent_item);
885                         num_refs = btrfs_extent_refs(leaf, ei);
886                         extent_flags = btrfs_extent_flags(leaf, ei);
887                 } else {
888 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
889                         struct btrfs_extent_item_v0 *ei0;
890                         BUG_ON(item_size != sizeof(*ei0));
891                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
892                                              struct btrfs_extent_item_v0);
893                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
894                         /* FIXME: this isn't correct for data */
895                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
896 #else
897                         BUG();
898 #endif
899                 }
900                 BUG_ON(num_refs == 0);
901         } else {
902                 num_refs = 0;
903                 extent_flags = 0;
904                 ret = 0;
905         }
906
907         if (!trans)
908                 goto out;
909
910         delayed_refs = &trans->transaction->delayed_refs;
911         spin_lock(&delayed_refs->lock);
912         head = btrfs_find_delayed_ref_head(trans, bytenr);
913         if (head) {
914                 if (!mutex_trylock(&head->mutex)) {
915                         atomic_inc(&head->node.refs);
916                         spin_unlock(&delayed_refs->lock);
917
918                         btrfs_release_path(path);
919
920                         /*
921                          * Mutex was contended, block until it's released and try
922                          * again
923                          */
924                         mutex_lock(&head->mutex);
925                         mutex_unlock(&head->mutex);
926                         btrfs_put_delayed_ref(&head->node);
927                         goto search_again;
928                 }
929                 spin_lock(&head->lock);
930                 if (head->extent_op && head->extent_op->update_flags)
931                         extent_flags |= head->extent_op->flags_to_set;
932                 else
933                         BUG_ON(num_refs == 0);
934
935                 num_refs += head->node.ref_mod;
936                 spin_unlock(&head->lock);
937                 mutex_unlock(&head->mutex);
938         }
939         spin_unlock(&delayed_refs->lock);
940 out:
941         WARN_ON(num_refs == 0);
942         if (refs)
943                 *refs = num_refs;
944         if (flags)
945                 *flags = extent_flags;
946 out_free:
947         btrfs_free_path(path);
948         return ret;
949 }
950
951 /*
952  * Back reference rules.  Back refs have three main goals:
953  *
954  * 1) differentiate between all holders of references to an extent so that
955  *    when a reference is dropped we can make sure it was a valid reference
956  *    before freeing the extent.
957  *
958  * 2) Provide enough information to quickly find the holders of an extent
959  *    if we notice a given block is corrupted or bad.
960  *
961  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
962  *    maintenance.  This is actually the same as #2, but with a slightly
963  *    different use case.
964  *
965  * There are two kinds of back refs. The implicit back refs is optimized
966  * for pointers in non-shared tree blocks. For a given pointer in a block,
967  * back refs of this kind provide information about the block's owner tree
968  * and the pointer's key. These information allow us to find the block by
969  * b-tree searching. The full back refs is for pointers in tree blocks not
970  * referenced by their owner trees. The location of tree block is recorded
971  * in the back refs. Actually the full back refs is generic, and can be
972  * used in all cases the implicit back refs is used. The major shortcoming
973  * of the full back refs is its overhead. Every time a tree block gets
974  * COWed, we have to update back refs entry for all pointers in it.
975  *
976  * For a newly allocated tree block, we use implicit back refs for
977  * pointers in it. This means most tree related operations only involve
978  * implicit back refs. For a tree block created in old transaction, the
979  * only way to drop a reference to it is COW it. So we can detect the
980  * event that tree block loses its owner tree's reference and do the
981  * back refs conversion.
982  *
983  * When a tree block is COWed through a tree, there are four cases:
984  *
985  * The reference count of the block is one and the tree is the block's
986  * owner tree. Nothing to do in this case.
987  *
988  * The reference count of the block is one and the tree is not the
989  * block's owner tree. In this case, full back refs is used for pointers
990  * in the block. Remove these full back refs, add implicit back refs for
991  * every pointers in the new block.
992  *
993  * The reference count of the block is greater than one and the tree is
994  * the block's owner tree. In this case, implicit back refs is used for
995  * pointers in the block. Add full back refs for every pointers in the
996  * block, increase lower level extents' reference counts. The original
997  * implicit back refs are entailed to the new block.
998  *
999  * The reference count of the block is greater than one and the tree is
1000  * not the block's owner tree. Add implicit back refs for every pointer in
1001  * the new block, increase lower level extents' reference count.
1002  *
1003  * Back Reference Key composing:
1004  *
1005  * The key objectid corresponds to the first byte in the extent,
1006  * The key type is used to differentiate between types of back refs.
1007  * There are different meanings of the key offset for different types
1008  * of back refs.
1009  *
1010  * File extents can be referenced by:
1011  *
1012  * - multiple snapshots, subvolumes, or different generations in one subvol
1013  * - different files inside a single subvolume
1014  * - different offsets inside a file (bookend extents in file.c)
1015  *
1016  * The extent ref structure for the implicit back refs has fields for:
1017  *
1018  * - Objectid of the subvolume root
1019  * - objectid of the file holding the reference
1020  * - original offset in the file
1021  * - how many bookend extents
1022  *
1023  * The key offset for the implicit back refs is hash of the first
1024  * three fields.
1025  *
1026  * The extent ref structure for the full back refs has field for:
1027  *
1028  * - number of pointers in the tree leaf
1029  *
1030  * The key offset for the implicit back refs is the first byte of
1031  * the tree leaf
1032  *
1033  * When a file extent is allocated, The implicit back refs is used.
1034  * the fields are filled in:
1035  *
1036  *     (root_key.objectid, inode objectid, offset in file, 1)
1037  *
1038  * When a file extent is removed file truncation, we find the
1039  * corresponding implicit back refs and check the following fields:
1040  *
1041  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1042  *
1043  * Btree extents can be referenced by:
1044  *
1045  * - Different subvolumes
1046  *
1047  * Both the implicit back refs and the full back refs for tree blocks
1048  * only consist of key. The key offset for the implicit back refs is
1049  * objectid of block's owner tree. The key offset for the full back refs
1050  * is the first byte of parent block.
1051  *
1052  * When implicit back refs is used, information about the lowest key and
1053  * level of the tree block are required. These information are stored in
1054  * tree block info structure.
1055  */
1056
1057 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1058 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1059                                   struct btrfs_root *root,
1060                                   struct btrfs_path *path,
1061                                   u64 owner, u32 extra_size)
1062 {
1063         struct btrfs_extent_item *item;
1064         struct btrfs_extent_item_v0 *ei0;
1065         struct btrfs_extent_ref_v0 *ref0;
1066         struct btrfs_tree_block_info *bi;
1067         struct extent_buffer *leaf;
1068         struct btrfs_key key;
1069         struct btrfs_key found_key;
1070         u32 new_size = sizeof(*item);
1071         u64 refs;
1072         int ret;
1073
1074         leaf = path->nodes[0];
1075         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1076
1077         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1078         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1079                              struct btrfs_extent_item_v0);
1080         refs = btrfs_extent_refs_v0(leaf, ei0);
1081
1082         if (owner == (u64)-1) {
1083                 while (1) {
1084                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1085                                 ret = btrfs_next_leaf(root, path);
1086                                 if (ret < 0)
1087                                         return ret;
1088                                 BUG_ON(ret > 0); /* Corruption */
1089                                 leaf = path->nodes[0];
1090                         }
1091                         btrfs_item_key_to_cpu(leaf, &found_key,
1092                                               path->slots[0]);
1093                         BUG_ON(key.objectid != found_key.objectid);
1094                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1095                                 path->slots[0]++;
1096                                 continue;
1097                         }
1098                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1099                                               struct btrfs_extent_ref_v0);
1100                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1101                         break;
1102                 }
1103         }
1104         btrfs_release_path(path);
1105
1106         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1107                 new_size += sizeof(*bi);
1108
1109         new_size -= sizeof(*ei0);
1110         ret = btrfs_search_slot(trans, root, &key, path,
1111                                 new_size + extra_size, 1);
1112         if (ret < 0)
1113                 return ret;
1114         BUG_ON(ret); /* Corruption */
1115
1116         btrfs_extend_item(root, path, new_size);
1117
1118         leaf = path->nodes[0];
1119         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1120         btrfs_set_extent_refs(leaf, item, refs);
1121         /* FIXME: get real generation */
1122         btrfs_set_extent_generation(leaf, item, 0);
1123         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1124                 btrfs_set_extent_flags(leaf, item,
1125                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1126                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1127                 bi = (struct btrfs_tree_block_info *)(item + 1);
1128                 /* FIXME: get first key of the block */
1129                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1130                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1131         } else {
1132                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1133         }
1134         btrfs_mark_buffer_dirty(leaf);
1135         return 0;
1136 }
1137 #endif
1138
1139 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1140 {
1141         u32 high_crc = ~(u32)0;
1142         u32 low_crc = ~(u32)0;
1143         __le64 lenum;
1144
1145         lenum = cpu_to_le64(root_objectid);
1146         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1147         lenum = cpu_to_le64(owner);
1148         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1149         lenum = cpu_to_le64(offset);
1150         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1151
1152         return ((u64)high_crc << 31) ^ (u64)low_crc;
1153 }
1154
1155 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1156                                      struct btrfs_extent_data_ref *ref)
1157 {
1158         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1159                                     btrfs_extent_data_ref_objectid(leaf, ref),
1160                                     btrfs_extent_data_ref_offset(leaf, ref));
1161 }
1162
1163 static int match_extent_data_ref(struct extent_buffer *leaf,
1164                                  struct btrfs_extent_data_ref *ref,
1165                                  u64 root_objectid, u64 owner, u64 offset)
1166 {
1167         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1168             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1169             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1170                 return 0;
1171         return 1;
1172 }
1173
1174 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1175                                            struct btrfs_root *root,
1176                                            struct btrfs_path *path,
1177                                            u64 bytenr, u64 parent,
1178                                            u64 root_objectid,
1179                                            u64 owner, u64 offset)
1180 {
1181         struct btrfs_key key;
1182         struct btrfs_extent_data_ref *ref;
1183         struct extent_buffer *leaf;
1184         u32 nritems;
1185         int ret;
1186         int recow;
1187         int err = -ENOENT;
1188
1189         key.objectid = bytenr;
1190         if (parent) {
1191                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1192                 key.offset = parent;
1193         } else {
1194                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1195                 key.offset = hash_extent_data_ref(root_objectid,
1196                                                   owner, offset);
1197         }
1198 again:
1199         recow = 0;
1200         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1201         if (ret < 0) {
1202                 err = ret;
1203                 goto fail;
1204         }
1205
1206         if (parent) {
1207                 if (!ret)
1208                         return 0;
1209 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1210                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1211                 btrfs_release_path(path);
1212                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1213                 if (ret < 0) {
1214                         err = ret;
1215                         goto fail;
1216                 }
1217                 if (!ret)
1218                         return 0;
1219 #endif
1220                 goto fail;
1221         }
1222
1223         leaf = path->nodes[0];
1224         nritems = btrfs_header_nritems(leaf);
1225         while (1) {
1226                 if (path->slots[0] >= nritems) {
1227                         ret = btrfs_next_leaf(root, path);
1228                         if (ret < 0)
1229                                 err = ret;
1230                         if (ret)
1231                                 goto fail;
1232
1233                         leaf = path->nodes[0];
1234                         nritems = btrfs_header_nritems(leaf);
1235                         recow = 1;
1236                 }
1237
1238                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1239                 if (key.objectid != bytenr ||
1240                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1241                         goto fail;
1242
1243                 ref = btrfs_item_ptr(leaf, path->slots[0],
1244                                      struct btrfs_extent_data_ref);
1245
1246                 if (match_extent_data_ref(leaf, ref, root_objectid,
1247                                           owner, offset)) {
1248                         if (recow) {
1249                                 btrfs_release_path(path);
1250                                 goto again;
1251                         }
1252                         err = 0;
1253                         break;
1254                 }
1255                 path->slots[0]++;
1256         }
1257 fail:
1258         return err;
1259 }
1260
1261 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            u64 bytenr, u64 parent,
1265                                            u64 root_objectid, u64 owner,
1266                                            u64 offset, int refs_to_add)
1267 {
1268         struct btrfs_key key;
1269         struct extent_buffer *leaf;
1270         u32 size;
1271         u32 num_refs;
1272         int ret;
1273
1274         key.objectid = bytenr;
1275         if (parent) {
1276                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1277                 key.offset = parent;
1278                 size = sizeof(struct btrfs_shared_data_ref);
1279         } else {
1280                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1281                 key.offset = hash_extent_data_ref(root_objectid,
1282                                                   owner, offset);
1283                 size = sizeof(struct btrfs_extent_data_ref);
1284         }
1285
1286         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1287         if (ret && ret != -EEXIST)
1288                 goto fail;
1289
1290         leaf = path->nodes[0];
1291         if (parent) {
1292                 struct btrfs_shared_data_ref *ref;
1293                 ref = btrfs_item_ptr(leaf, path->slots[0],
1294                                      struct btrfs_shared_data_ref);
1295                 if (ret == 0) {
1296                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1297                 } else {
1298                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1299                         num_refs += refs_to_add;
1300                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1301                 }
1302         } else {
1303                 struct btrfs_extent_data_ref *ref;
1304                 while (ret == -EEXIST) {
1305                         ref = btrfs_item_ptr(leaf, path->slots[0],
1306                                              struct btrfs_extent_data_ref);
1307                         if (match_extent_data_ref(leaf, ref, root_objectid,
1308                                                   owner, offset))
1309                                 break;
1310                         btrfs_release_path(path);
1311                         key.offset++;
1312                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1313                                                       size);
1314                         if (ret && ret != -EEXIST)
1315                                 goto fail;
1316
1317                         leaf = path->nodes[0];
1318                 }
1319                 ref = btrfs_item_ptr(leaf, path->slots[0],
1320                                      struct btrfs_extent_data_ref);
1321                 if (ret == 0) {
1322                         btrfs_set_extent_data_ref_root(leaf, ref,
1323                                                        root_objectid);
1324                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1325                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1326                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1327                 } else {
1328                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1329                         num_refs += refs_to_add;
1330                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1331                 }
1332         }
1333         btrfs_mark_buffer_dirty(leaf);
1334         ret = 0;
1335 fail:
1336         btrfs_release_path(path);
1337         return ret;
1338 }
1339
1340 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1341                                            struct btrfs_root *root,
1342                                            struct btrfs_path *path,
1343                                            int refs_to_drop, int *last_ref)
1344 {
1345         struct btrfs_key key;
1346         struct btrfs_extent_data_ref *ref1 = NULL;
1347         struct btrfs_shared_data_ref *ref2 = NULL;
1348         struct extent_buffer *leaf;
1349         u32 num_refs = 0;
1350         int ret = 0;
1351
1352         leaf = path->nodes[0];
1353         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1354
1355         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1356                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1357                                       struct btrfs_extent_data_ref);
1358                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1359         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1360                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1361                                       struct btrfs_shared_data_ref);
1362                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1363 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1364         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1365                 struct btrfs_extent_ref_v0 *ref0;
1366                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1367                                       struct btrfs_extent_ref_v0);
1368                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1369 #endif
1370         } else {
1371                 BUG();
1372         }
1373
1374         BUG_ON(num_refs < refs_to_drop);
1375         num_refs -= refs_to_drop;
1376
1377         if (num_refs == 0) {
1378                 ret = btrfs_del_item(trans, root, path);
1379                 *last_ref = 1;
1380         } else {
1381                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1382                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1383                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1384                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1385 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1386                 else {
1387                         struct btrfs_extent_ref_v0 *ref0;
1388                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1389                                         struct btrfs_extent_ref_v0);
1390                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1391                 }
1392 #endif
1393                 btrfs_mark_buffer_dirty(leaf);
1394         }
1395         return ret;
1396 }
1397
1398 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1399                                           struct btrfs_extent_inline_ref *iref)
1400 {
1401         struct btrfs_key key;
1402         struct extent_buffer *leaf;
1403         struct btrfs_extent_data_ref *ref1;
1404         struct btrfs_shared_data_ref *ref2;
1405         u32 num_refs = 0;
1406
1407         leaf = path->nodes[0];
1408         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1409         if (iref) {
1410                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1411                     BTRFS_EXTENT_DATA_REF_KEY) {
1412                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1413                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1414                 } else {
1415                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1416                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1417                 }
1418         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1419                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1420                                       struct btrfs_extent_data_ref);
1421                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1422         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1423                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1424                                       struct btrfs_shared_data_ref);
1425                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1426 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1427         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1428                 struct btrfs_extent_ref_v0 *ref0;
1429                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1430                                       struct btrfs_extent_ref_v0);
1431                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1432 #endif
1433         } else {
1434                 WARN_ON(1);
1435         }
1436         return num_refs;
1437 }
1438
1439 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1440                                           struct btrfs_root *root,
1441                                           struct btrfs_path *path,
1442                                           u64 bytenr, u64 parent,
1443                                           u64 root_objectid)
1444 {
1445         struct btrfs_key key;
1446         int ret;
1447
1448         key.objectid = bytenr;
1449         if (parent) {
1450                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1451                 key.offset = parent;
1452         } else {
1453                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1454                 key.offset = root_objectid;
1455         }
1456
1457         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1458         if (ret > 0)
1459                 ret = -ENOENT;
1460 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1461         if (ret == -ENOENT && parent) {
1462                 btrfs_release_path(path);
1463                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1464                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1465                 if (ret > 0)
1466                         ret = -ENOENT;
1467         }
1468 #endif
1469         return ret;
1470 }
1471
1472 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1473                                           struct btrfs_root *root,
1474                                           struct btrfs_path *path,
1475                                           u64 bytenr, u64 parent,
1476                                           u64 root_objectid)
1477 {
1478         struct btrfs_key key;
1479         int ret;
1480
1481         key.objectid = bytenr;
1482         if (parent) {
1483                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1484                 key.offset = parent;
1485         } else {
1486                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1487                 key.offset = root_objectid;
1488         }
1489
1490         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1491         btrfs_release_path(path);
1492         return ret;
1493 }
1494
1495 static inline int extent_ref_type(u64 parent, u64 owner)
1496 {
1497         int type;
1498         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1499                 if (parent > 0)
1500                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1501                 else
1502                         type = BTRFS_TREE_BLOCK_REF_KEY;
1503         } else {
1504                 if (parent > 0)
1505                         type = BTRFS_SHARED_DATA_REF_KEY;
1506                 else
1507                         type = BTRFS_EXTENT_DATA_REF_KEY;
1508         }
1509         return type;
1510 }
1511
1512 static int find_next_key(struct btrfs_path *path, int level,
1513                          struct btrfs_key *key)
1514
1515 {
1516         for (; level < BTRFS_MAX_LEVEL; level++) {
1517                 if (!path->nodes[level])
1518                         break;
1519                 if (path->slots[level] + 1 >=
1520                     btrfs_header_nritems(path->nodes[level]))
1521                         continue;
1522                 if (level == 0)
1523                         btrfs_item_key_to_cpu(path->nodes[level], key,
1524                                               path->slots[level] + 1);
1525                 else
1526                         btrfs_node_key_to_cpu(path->nodes[level], key,
1527                                               path->slots[level] + 1);
1528                 return 0;
1529         }
1530         return 1;
1531 }
1532
1533 /*
1534  * look for inline back ref. if back ref is found, *ref_ret is set
1535  * to the address of inline back ref, and 0 is returned.
1536  *
1537  * if back ref isn't found, *ref_ret is set to the address where it
1538  * should be inserted, and -ENOENT is returned.
1539  *
1540  * if insert is true and there are too many inline back refs, the path
1541  * points to the extent item, and -EAGAIN is returned.
1542  *
1543  * NOTE: inline back refs are ordered in the same way that back ref
1544  *       items in the tree are ordered.
1545  */
1546 static noinline_for_stack
1547 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1548                                  struct btrfs_root *root,
1549                                  struct btrfs_path *path,
1550                                  struct btrfs_extent_inline_ref **ref_ret,
1551                                  u64 bytenr, u64 num_bytes,
1552                                  u64 parent, u64 root_objectid,
1553                                  u64 owner, u64 offset, int insert)
1554 {
1555         struct btrfs_key key;
1556         struct extent_buffer *leaf;
1557         struct btrfs_extent_item *ei;
1558         struct btrfs_extent_inline_ref *iref;
1559         u64 flags;
1560         u64 item_size;
1561         unsigned long ptr;
1562         unsigned long end;
1563         int extra_size;
1564         int type;
1565         int want;
1566         int ret;
1567         int err = 0;
1568         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1569                                                  SKINNY_METADATA);
1570
1571         key.objectid = bytenr;
1572         key.type = BTRFS_EXTENT_ITEM_KEY;
1573         key.offset = num_bytes;
1574
1575         want = extent_ref_type(parent, owner);
1576         if (insert) {
1577                 extra_size = btrfs_extent_inline_ref_size(want);
1578                 path->keep_locks = 1;
1579         } else
1580                 extra_size = -1;
1581
1582         /*
1583          * Owner is our parent level, so we can just add one to get the level
1584          * for the block we are interested in.
1585          */
1586         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1587                 key.type = BTRFS_METADATA_ITEM_KEY;
1588                 key.offset = owner;
1589         }
1590
1591 again:
1592         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1593         if (ret < 0) {
1594                 err = ret;
1595                 goto out;
1596         }
1597
1598         /*
1599          * We may be a newly converted file system which still has the old fat
1600          * extent entries for metadata, so try and see if we have one of those.
1601          */
1602         if (ret > 0 && skinny_metadata) {
1603                 skinny_metadata = false;
1604                 if (path->slots[0]) {
1605                         path->slots[0]--;
1606                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1607                                               path->slots[0]);
1608                         if (key.objectid == bytenr &&
1609                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1610                             key.offset == num_bytes)
1611                                 ret = 0;
1612                 }
1613                 if (ret) {
1614                         key.objectid = bytenr;
1615                         key.type = BTRFS_EXTENT_ITEM_KEY;
1616                         key.offset = num_bytes;
1617                         btrfs_release_path(path);
1618                         goto again;
1619                 }
1620         }
1621
1622         if (ret && !insert) {
1623                 err = -ENOENT;
1624                 goto out;
1625         } else if (WARN_ON(ret)) {
1626                 err = -EIO;
1627                 goto out;
1628         }
1629
1630         leaf = path->nodes[0];
1631         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1632 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1633         if (item_size < sizeof(*ei)) {
1634                 if (!insert) {
1635                         err = -ENOENT;
1636                         goto out;
1637                 }
1638                 ret = convert_extent_item_v0(trans, root, path, owner,
1639                                              extra_size);
1640                 if (ret < 0) {
1641                         err = ret;
1642                         goto out;
1643                 }
1644                 leaf = path->nodes[0];
1645                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1646         }
1647 #endif
1648         BUG_ON(item_size < sizeof(*ei));
1649
1650         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1651         flags = btrfs_extent_flags(leaf, ei);
1652
1653         ptr = (unsigned long)(ei + 1);
1654         end = (unsigned long)ei + item_size;
1655
1656         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1657                 ptr += sizeof(struct btrfs_tree_block_info);
1658                 BUG_ON(ptr > end);
1659         }
1660
1661         err = -ENOENT;
1662         while (1) {
1663                 if (ptr >= end) {
1664                         WARN_ON(ptr > end);
1665                         break;
1666                 }
1667                 iref = (struct btrfs_extent_inline_ref *)ptr;
1668                 type = btrfs_extent_inline_ref_type(leaf, iref);
1669                 if (want < type)
1670                         break;
1671                 if (want > type) {
1672                         ptr += btrfs_extent_inline_ref_size(type);
1673                         continue;
1674                 }
1675
1676                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1677                         struct btrfs_extent_data_ref *dref;
1678                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1679                         if (match_extent_data_ref(leaf, dref, root_objectid,
1680                                                   owner, offset)) {
1681                                 err = 0;
1682                                 break;
1683                         }
1684                         if (hash_extent_data_ref_item(leaf, dref) <
1685                             hash_extent_data_ref(root_objectid, owner, offset))
1686                                 break;
1687                 } else {
1688                         u64 ref_offset;
1689                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1690                         if (parent > 0) {
1691                                 if (parent == ref_offset) {
1692                                         err = 0;
1693                                         break;
1694                                 }
1695                                 if (ref_offset < parent)
1696                                         break;
1697                         } else {
1698                                 if (root_objectid == ref_offset) {
1699                                         err = 0;
1700                                         break;
1701                                 }
1702                                 if (ref_offset < root_objectid)
1703                                         break;
1704                         }
1705                 }
1706                 ptr += btrfs_extent_inline_ref_size(type);
1707         }
1708         if (err == -ENOENT && insert) {
1709                 if (item_size + extra_size >=
1710                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1711                         err = -EAGAIN;
1712                         goto out;
1713                 }
1714                 /*
1715                  * To add new inline back ref, we have to make sure
1716                  * there is no corresponding back ref item.
1717                  * For simplicity, we just do not add new inline back
1718                  * ref if there is any kind of item for this block
1719                  */
1720                 if (find_next_key(path, 0, &key) == 0 &&
1721                     key.objectid == bytenr &&
1722                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1723                         err = -EAGAIN;
1724                         goto out;
1725                 }
1726         }
1727         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1728 out:
1729         if (insert) {
1730                 path->keep_locks = 0;
1731                 btrfs_unlock_up_safe(path, 1);
1732         }
1733         return err;
1734 }
1735
1736 /*
1737  * helper to add new inline back ref
1738  */
1739 static noinline_for_stack
1740 void setup_inline_extent_backref(struct btrfs_root *root,
1741                                  struct btrfs_path *path,
1742                                  struct btrfs_extent_inline_ref *iref,
1743                                  u64 parent, u64 root_objectid,
1744                                  u64 owner, u64 offset, int refs_to_add,
1745                                  struct btrfs_delayed_extent_op *extent_op)
1746 {
1747         struct extent_buffer *leaf;
1748         struct btrfs_extent_item *ei;
1749         unsigned long ptr;
1750         unsigned long end;
1751         unsigned long item_offset;
1752         u64 refs;
1753         int size;
1754         int type;
1755
1756         leaf = path->nodes[0];
1757         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1758         item_offset = (unsigned long)iref - (unsigned long)ei;
1759
1760         type = extent_ref_type(parent, owner);
1761         size = btrfs_extent_inline_ref_size(type);
1762
1763         btrfs_extend_item(root, path, size);
1764
1765         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1766         refs = btrfs_extent_refs(leaf, ei);
1767         refs += refs_to_add;
1768         btrfs_set_extent_refs(leaf, ei, refs);
1769         if (extent_op)
1770                 __run_delayed_extent_op(extent_op, leaf, ei);
1771
1772         ptr = (unsigned long)ei + item_offset;
1773         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1774         if (ptr < end - size)
1775                 memmove_extent_buffer(leaf, ptr + size, ptr,
1776                                       end - size - ptr);
1777
1778         iref = (struct btrfs_extent_inline_ref *)ptr;
1779         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1780         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1781                 struct btrfs_extent_data_ref *dref;
1782                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1783                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1784                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1785                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1786                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1787         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788                 struct btrfs_shared_data_ref *sref;
1789                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1790                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1791                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1792         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1793                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1794         } else {
1795                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1796         }
1797         btrfs_mark_buffer_dirty(leaf);
1798 }
1799
1800 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1801                                  struct btrfs_root *root,
1802                                  struct btrfs_path *path,
1803                                  struct btrfs_extent_inline_ref **ref_ret,
1804                                  u64 bytenr, u64 num_bytes, u64 parent,
1805                                  u64 root_objectid, u64 owner, u64 offset)
1806 {
1807         int ret;
1808
1809         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1810                                            bytenr, num_bytes, parent,
1811                                            root_objectid, owner, offset, 0);
1812         if (ret != -ENOENT)
1813                 return ret;
1814
1815         btrfs_release_path(path);
1816         *ref_ret = NULL;
1817
1818         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1819                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1820                                             root_objectid);
1821         } else {
1822                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1823                                              root_objectid, owner, offset);
1824         }
1825         return ret;
1826 }
1827
1828 /*
1829  * helper to update/remove inline back ref
1830  */
1831 static noinline_for_stack
1832 void update_inline_extent_backref(struct btrfs_root *root,
1833                                   struct btrfs_path *path,
1834                                   struct btrfs_extent_inline_ref *iref,
1835                                   int refs_to_mod,
1836                                   struct btrfs_delayed_extent_op *extent_op,
1837                                   int *last_ref)
1838 {
1839         struct extent_buffer *leaf;
1840         struct btrfs_extent_item *ei;
1841         struct btrfs_extent_data_ref *dref = NULL;
1842         struct btrfs_shared_data_ref *sref = NULL;
1843         unsigned long ptr;
1844         unsigned long end;
1845         u32 item_size;
1846         int size;
1847         int type;
1848         u64 refs;
1849
1850         leaf = path->nodes[0];
1851         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1852         refs = btrfs_extent_refs(leaf, ei);
1853         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1854         refs += refs_to_mod;
1855         btrfs_set_extent_refs(leaf, ei, refs);
1856         if (extent_op)
1857                 __run_delayed_extent_op(extent_op, leaf, ei);
1858
1859         type = btrfs_extent_inline_ref_type(leaf, iref);
1860
1861         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1862                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1863                 refs = btrfs_extent_data_ref_count(leaf, dref);
1864         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1865                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1866                 refs = btrfs_shared_data_ref_count(leaf, sref);
1867         } else {
1868                 refs = 1;
1869                 BUG_ON(refs_to_mod != -1);
1870         }
1871
1872         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1873         refs += refs_to_mod;
1874
1875         if (refs > 0) {
1876                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1877                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1878                 else
1879                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1880         } else {
1881                 *last_ref = 1;
1882                 size =  btrfs_extent_inline_ref_size(type);
1883                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1884                 ptr = (unsigned long)iref;
1885                 end = (unsigned long)ei + item_size;
1886                 if (ptr + size < end)
1887                         memmove_extent_buffer(leaf, ptr, ptr + size,
1888                                               end - ptr - size);
1889                 item_size -= size;
1890                 btrfs_truncate_item(root, path, item_size, 1);
1891         }
1892         btrfs_mark_buffer_dirty(leaf);
1893 }
1894
1895 static noinline_for_stack
1896 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1897                                  struct btrfs_root *root,
1898                                  struct btrfs_path *path,
1899                                  u64 bytenr, u64 num_bytes, u64 parent,
1900                                  u64 root_objectid, u64 owner,
1901                                  u64 offset, int refs_to_add,
1902                                  struct btrfs_delayed_extent_op *extent_op)
1903 {
1904         struct btrfs_extent_inline_ref *iref;
1905         int ret;
1906
1907         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1908                                            bytenr, num_bytes, parent,
1909                                            root_objectid, owner, offset, 1);
1910         if (ret == 0) {
1911                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1912                 update_inline_extent_backref(root, path, iref,
1913                                              refs_to_add, extent_op, NULL);
1914         } else if (ret == -ENOENT) {
1915                 setup_inline_extent_backref(root, path, iref, parent,
1916                                             root_objectid, owner, offset,
1917                                             refs_to_add, extent_op);
1918                 ret = 0;
1919         }
1920         return ret;
1921 }
1922
1923 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1924                                  struct btrfs_root *root,
1925                                  struct btrfs_path *path,
1926                                  u64 bytenr, u64 parent, u64 root_objectid,
1927                                  u64 owner, u64 offset, int refs_to_add)
1928 {
1929         int ret;
1930         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1931                 BUG_ON(refs_to_add != 1);
1932                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1933                                             parent, root_objectid);
1934         } else {
1935                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1936                                              parent, root_objectid,
1937                                              owner, offset, refs_to_add);
1938         }
1939         return ret;
1940 }
1941
1942 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1943                                  struct btrfs_root *root,
1944                                  struct btrfs_path *path,
1945                                  struct btrfs_extent_inline_ref *iref,
1946                                  int refs_to_drop, int is_data, int *last_ref)
1947 {
1948         int ret = 0;
1949
1950         BUG_ON(!is_data && refs_to_drop != 1);
1951         if (iref) {
1952                 update_inline_extent_backref(root, path, iref,
1953                                              -refs_to_drop, NULL, last_ref);
1954         } else if (is_data) {
1955                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1956                                              last_ref);
1957         } else {
1958                 *last_ref = 1;
1959                 ret = btrfs_del_item(trans, root, path);
1960         }
1961         return ret;
1962 }
1963
1964 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1965 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1966                                u64 *discarded_bytes)
1967 {
1968         int j, ret = 0;
1969         u64 bytes_left, end;
1970         u64 aligned_start = ALIGN(start, 1 << 9);
1971
1972         if (WARN_ON(start != aligned_start)) {
1973                 len -= aligned_start - start;
1974                 len = round_down(len, 1 << 9);
1975                 start = aligned_start;
1976         }
1977
1978         *discarded_bytes = 0;
1979
1980         if (!len)
1981                 return 0;
1982
1983         end = start + len;
1984         bytes_left = len;
1985
1986         /* Skip any superblocks on this device. */
1987         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1988                 u64 sb_start = btrfs_sb_offset(j);
1989                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1990                 u64 size = sb_start - start;
1991
1992                 if (!in_range(sb_start, start, bytes_left) &&
1993                     !in_range(sb_end, start, bytes_left) &&
1994                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1995                         continue;
1996
1997                 /*
1998                  * Superblock spans beginning of range.  Adjust start and
1999                  * try again.
2000                  */
2001                 if (sb_start <= start) {
2002                         start += sb_end - start;
2003                         if (start > end) {
2004                                 bytes_left = 0;
2005                                 break;
2006                         }
2007                         bytes_left = end - start;
2008                         continue;
2009                 }
2010
2011                 if (size) {
2012                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2013                                                    GFP_NOFS, 0);
2014                         if (!ret)
2015                                 *discarded_bytes += size;
2016                         else if (ret != -EOPNOTSUPP)
2017                                 return ret;
2018                 }
2019
2020                 start = sb_end;
2021                 if (start > end) {
2022                         bytes_left = 0;
2023                         break;
2024                 }
2025                 bytes_left = end - start;
2026         }
2027
2028         if (bytes_left) {
2029                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2030                                            GFP_NOFS, 0);
2031                 if (!ret)
2032                         *discarded_bytes += bytes_left;
2033         }
2034         return ret;
2035 }
2036
2037 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2038                          u64 num_bytes, u64 *actual_bytes)
2039 {
2040         int ret;
2041         u64 discarded_bytes = 0;
2042         struct btrfs_bio *bbio = NULL;
2043
2044
2045         /* Tell the block device(s) that the sectors can be discarded */
2046         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2047                               bytenr, &num_bytes, &bbio, 0);
2048         /* Error condition is -ENOMEM */
2049         if (!ret) {
2050                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2051                 int i;
2052
2053
2054                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2055                         u64 bytes;
2056                         if (!stripe->dev->can_discard)
2057                                 continue;
2058
2059                         ret = btrfs_issue_discard(stripe->dev->bdev,
2060                                                   stripe->physical,
2061                                                   stripe->length,
2062                                                   &bytes);
2063                         if (!ret)
2064                                 discarded_bytes += bytes;
2065                         else if (ret != -EOPNOTSUPP)
2066                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2067
2068                         /*
2069                          * Just in case we get back EOPNOTSUPP for some reason,
2070                          * just ignore the return value so we don't screw up
2071                          * people calling discard_extent.
2072                          */
2073                         ret = 0;
2074                 }
2075                 btrfs_put_bbio(bbio);
2076         }
2077
2078         if (actual_bytes)
2079                 *actual_bytes = discarded_bytes;
2080
2081
2082         if (ret == -EOPNOTSUPP)
2083                 ret = 0;
2084         return ret;
2085 }
2086
2087 /* Can return -ENOMEM */
2088 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2089                          struct btrfs_root *root,
2090                          u64 bytenr, u64 num_bytes, u64 parent,
2091                          u64 root_objectid, u64 owner, u64 offset)
2092 {
2093         int ret;
2094         struct btrfs_fs_info *fs_info = root->fs_info;
2095
2096         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2097                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2098
2099         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2100                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2101                                         num_bytes,
2102                                         parent, root_objectid, (int)owner,
2103                                         BTRFS_ADD_DELAYED_REF, NULL);
2104         } else {
2105                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2106                                         num_bytes, parent, root_objectid,
2107                                         owner, offset, 0,
2108                                         BTRFS_ADD_DELAYED_REF, NULL);
2109         }
2110         return ret;
2111 }
2112
2113 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2114                                   struct btrfs_root *root,
2115                                   struct btrfs_delayed_ref_node *node,
2116                                   u64 parent, u64 root_objectid,
2117                                   u64 owner, u64 offset, int refs_to_add,
2118                                   struct btrfs_delayed_extent_op *extent_op)
2119 {
2120         struct btrfs_fs_info *fs_info = root->fs_info;
2121         struct btrfs_path *path;
2122         struct extent_buffer *leaf;
2123         struct btrfs_extent_item *item;
2124         struct btrfs_key key;
2125         u64 bytenr = node->bytenr;
2126         u64 num_bytes = node->num_bytes;
2127         u64 refs;
2128         int ret;
2129
2130         path = btrfs_alloc_path();
2131         if (!path)
2132                 return -ENOMEM;
2133
2134         path->reada = READA_FORWARD;
2135         path->leave_spinning = 1;
2136         /* this will setup the path even if it fails to insert the back ref */
2137         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2138                                            bytenr, num_bytes, parent,
2139                                            root_objectid, owner, offset,
2140                                            refs_to_add, extent_op);
2141         if ((ret < 0 && ret != -EAGAIN) || !ret)
2142                 goto out;
2143
2144         /*
2145          * Ok we had -EAGAIN which means we didn't have space to insert and
2146          * inline extent ref, so just update the reference count and add a
2147          * normal backref.
2148          */
2149         leaf = path->nodes[0];
2150         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2151         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2152         refs = btrfs_extent_refs(leaf, item);
2153         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2154         if (extent_op)
2155                 __run_delayed_extent_op(extent_op, leaf, item);
2156
2157         btrfs_mark_buffer_dirty(leaf);
2158         btrfs_release_path(path);
2159
2160         path->reada = READA_FORWARD;
2161         path->leave_spinning = 1;
2162         /* now insert the actual backref */
2163         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2164                                     path, bytenr, parent, root_objectid,
2165                                     owner, offset, refs_to_add);
2166         if (ret)
2167                 btrfs_abort_transaction(trans, root, ret);
2168 out:
2169         btrfs_free_path(path);
2170         return ret;
2171 }
2172
2173 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2174                                 struct btrfs_root *root,
2175                                 struct btrfs_delayed_ref_node *node,
2176                                 struct btrfs_delayed_extent_op *extent_op,
2177                                 int insert_reserved)
2178 {
2179         int ret = 0;
2180         struct btrfs_delayed_data_ref *ref;
2181         struct btrfs_key ins;
2182         u64 parent = 0;
2183         u64 ref_root = 0;
2184         u64 flags = 0;
2185
2186         ins.objectid = node->bytenr;
2187         ins.offset = node->num_bytes;
2188         ins.type = BTRFS_EXTENT_ITEM_KEY;
2189
2190         ref = btrfs_delayed_node_to_data_ref(node);
2191         trace_run_delayed_data_ref(node, ref, node->action);
2192
2193         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2194                 parent = ref->parent;
2195         ref_root = ref->root;
2196
2197         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2198                 if (extent_op)
2199                         flags |= extent_op->flags_to_set;
2200                 ret = alloc_reserved_file_extent(trans, root,
2201                                                  parent, ref_root, flags,
2202                                                  ref->objectid, ref->offset,
2203                                                  &ins, node->ref_mod);
2204         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2205                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2206                                              ref_root, ref->objectid,
2207                                              ref->offset, node->ref_mod,
2208                                              extent_op);
2209         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2210                 ret = __btrfs_free_extent(trans, root, node, parent,
2211                                           ref_root, ref->objectid,
2212                                           ref->offset, node->ref_mod,
2213                                           extent_op);
2214         } else {
2215                 BUG();
2216         }
2217         return ret;
2218 }
2219
2220 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2221                                     struct extent_buffer *leaf,
2222                                     struct btrfs_extent_item *ei)
2223 {
2224         u64 flags = btrfs_extent_flags(leaf, ei);
2225         if (extent_op->update_flags) {
2226                 flags |= extent_op->flags_to_set;
2227                 btrfs_set_extent_flags(leaf, ei, flags);
2228         }
2229
2230         if (extent_op->update_key) {
2231                 struct btrfs_tree_block_info *bi;
2232                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2233                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2234                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2235         }
2236 }
2237
2238 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2239                                  struct btrfs_root *root,
2240                                  struct btrfs_delayed_ref_node *node,
2241                                  struct btrfs_delayed_extent_op *extent_op)
2242 {
2243         struct btrfs_key key;
2244         struct btrfs_path *path;
2245         struct btrfs_extent_item *ei;
2246         struct extent_buffer *leaf;
2247         u32 item_size;
2248         int ret;
2249         int err = 0;
2250         int metadata = !extent_op->is_data;
2251
2252         if (trans->aborted)
2253                 return 0;
2254
2255         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2256                 metadata = 0;
2257
2258         path = btrfs_alloc_path();
2259         if (!path)
2260                 return -ENOMEM;
2261
2262         key.objectid = node->bytenr;
2263
2264         if (metadata) {
2265                 key.type = BTRFS_METADATA_ITEM_KEY;
2266                 key.offset = extent_op->level;
2267         } else {
2268                 key.type = BTRFS_EXTENT_ITEM_KEY;
2269                 key.offset = node->num_bytes;
2270         }
2271
2272 again:
2273         path->reada = READA_FORWARD;
2274         path->leave_spinning = 1;
2275         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2276                                 path, 0, 1);
2277         if (ret < 0) {
2278                 err = ret;
2279                 goto out;
2280         }
2281         if (ret > 0) {
2282                 if (metadata) {
2283                         if (path->slots[0] > 0) {
2284                                 path->slots[0]--;
2285                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2286                                                       path->slots[0]);
2287                                 if (key.objectid == node->bytenr &&
2288                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2289                                     key.offset == node->num_bytes)
2290                                         ret = 0;
2291                         }
2292                         if (ret > 0) {
2293                                 btrfs_release_path(path);
2294                                 metadata = 0;
2295
2296                                 key.objectid = node->bytenr;
2297                                 key.offset = node->num_bytes;
2298                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2299                                 goto again;
2300                         }
2301                 } else {
2302                         err = -EIO;
2303                         goto out;
2304                 }
2305         }
2306
2307         leaf = path->nodes[0];
2308         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2310         if (item_size < sizeof(*ei)) {
2311                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2312                                              path, (u64)-1, 0);
2313                 if (ret < 0) {
2314                         err = ret;
2315                         goto out;
2316                 }
2317                 leaf = path->nodes[0];
2318                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2319         }
2320 #endif
2321         BUG_ON(item_size < sizeof(*ei));
2322         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2323         __run_delayed_extent_op(extent_op, leaf, ei);
2324
2325         btrfs_mark_buffer_dirty(leaf);
2326 out:
2327         btrfs_free_path(path);
2328         return err;
2329 }
2330
2331 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2332                                 struct btrfs_root *root,
2333                                 struct btrfs_delayed_ref_node *node,
2334                                 struct btrfs_delayed_extent_op *extent_op,
2335                                 int insert_reserved)
2336 {
2337         int ret = 0;
2338         struct btrfs_delayed_tree_ref *ref;
2339         struct btrfs_key ins;
2340         u64 parent = 0;
2341         u64 ref_root = 0;
2342         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2343                                                  SKINNY_METADATA);
2344
2345         ref = btrfs_delayed_node_to_tree_ref(node);
2346         trace_run_delayed_tree_ref(node, ref, node->action);
2347
2348         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2349                 parent = ref->parent;
2350         ref_root = ref->root;
2351
2352         ins.objectid = node->bytenr;
2353         if (skinny_metadata) {
2354                 ins.offset = ref->level;
2355                 ins.type = BTRFS_METADATA_ITEM_KEY;
2356         } else {
2357                 ins.offset = node->num_bytes;
2358                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2359         }
2360
2361         BUG_ON(node->ref_mod != 1);
2362         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2363                 BUG_ON(!extent_op || !extent_op->update_flags);
2364                 ret = alloc_reserved_tree_block(trans, root,
2365                                                 parent, ref_root,
2366                                                 extent_op->flags_to_set,
2367                                                 &extent_op->key,
2368                                                 ref->level, &ins);
2369         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2370                 ret = __btrfs_inc_extent_ref(trans, root, node,
2371                                              parent, ref_root,
2372                                              ref->level, 0, 1,
2373                                              extent_op);
2374         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2375                 ret = __btrfs_free_extent(trans, root, node,
2376                                           parent, ref_root,
2377                                           ref->level, 0, 1, extent_op);
2378         } else {
2379                 BUG();
2380         }
2381         return ret;
2382 }
2383
2384 /* helper function to actually process a single delayed ref entry */
2385 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2386                                struct btrfs_root *root,
2387                                struct btrfs_delayed_ref_node *node,
2388                                struct btrfs_delayed_extent_op *extent_op,
2389                                int insert_reserved)
2390 {
2391         int ret = 0;
2392
2393         if (trans->aborted) {
2394                 if (insert_reserved)
2395                         btrfs_pin_extent(root, node->bytenr,
2396                                          node->num_bytes, 1);
2397                 return 0;
2398         }
2399
2400         if (btrfs_delayed_ref_is_head(node)) {
2401                 struct btrfs_delayed_ref_head *head;
2402                 /*
2403                  * we've hit the end of the chain and we were supposed
2404                  * to insert this extent into the tree.  But, it got
2405                  * deleted before we ever needed to insert it, so all
2406                  * we have to do is clean up the accounting
2407                  */
2408                 BUG_ON(extent_op);
2409                 head = btrfs_delayed_node_to_head(node);
2410                 trace_run_delayed_ref_head(node, head, node->action);
2411
2412                 if (insert_reserved) {
2413                         btrfs_pin_extent(root, node->bytenr,
2414                                          node->num_bytes, 1);
2415                         if (head->is_data) {
2416                                 ret = btrfs_del_csums(trans, root,
2417                                                       node->bytenr,
2418                                                       node->num_bytes);
2419                         }
2420                 }
2421
2422                 /* Also free its reserved qgroup space */
2423                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2424                                               head->qgroup_ref_root,
2425                                               head->qgroup_reserved);
2426                 return ret;
2427         }
2428
2429         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2430             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2431                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2432                                            insert_reserved);
2433         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2434                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2435                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2436                                            insert_reserved);
2437         else
2438                 BUG();
2439         return ret;
2440 }
2441
2442 static inline struct btrfs_delayed_ref_node *
2443 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2444 {
2445         struct btrfs_delayed_ref_node *ref;
2446
2447         if (list_empty(&head->ref_list))
2448                 return NULL;
2449
2450         /*
2451          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2452          * This is to prevent a ref count from going down to zero, which deletes
2453          * the extent item from the extent tree, when there still are references
2454          * to add, which would fail because they would not find the extent item.
2455          */
2456         list_for_each_entry(ref, &head->ref_list, list) {
2457                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2458                         return ref;
2459         }
2460
2461         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2462                           list);
2463 }
2464
2465 /*
2466  * Returns 0 on success or if called with an already aborted transaction.
2467  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2468  */
2469 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2470                                              struct btrfs_root *root,
2471                                              unsigned long nr)
2472 {
2473         struct btrfs_delayed_ref_root *delayed_refs;
2474         struct btrfs_delayed_ref_node *ref;
2475         struct btrfs_delayed_ref_head *locked_ref = NULL;
2476         struct btrfs_delayed_extent_op *extent_op;
2477         struct btrfs_fs_info *fs_info = root->fs_info;
2478         ktime_t start = ktime_get();
2479         int ret;
2480         unsigned long count = 0;
2481         unsigned long actual_count = 0;
2482         int must_insert_reserved = 0;
2483
2484         delayed_refs = &trans->transaction->delayed_refs;
2485         while (1) {
2486                 if (!locked_ref) {
2487                         if (count >= nr)
2488                                 break;
2489
2490                         spin_lock(&delayed_refs->lock);
2491                         locked_ref = btrfs_select_ref_head(trans);
2492                         if (!locked_ref) {
2493                                 spin_unlock(&delayed_refs->lock);
2494                                 break;
2495                         }
2496
2497                         /* grab the lock that says we are going to process
2498                          * all the refs for this head */
2499                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2500                         spin_unlock(&delayed_refs->lock);
2501                         /*
2502                          * we may have dropped the spin lock to get the head
2503                          * mutex lock, and that might have given someone else
2504                          * time to free the head.  If that's true, it has been
2505                          * removed from our list and we can move on.
2506                          */
2507                         if (ret == -EAGAIN) {
2508                                 locked_ref = NULL;
2509                                 count++;
2510                                 continue;
2511                         }
2512                 }
2513
2514                 /*
2515                  * We need to try and merge add/drops of the same ref since we
2516                  * can run into issues with relocate dropping the implicit ref
2517                  * and then it being added back again before the drop can
2518                  * finish.  If we merged anything we need to re-loop so we can
2519                  * get a good ref.
2520                  * Or we can get node references of the same type that weren't
2521                  * merged when created due to bumps in the tree mod seq, and
2522                  * we need to merge them to prevent adding an inline extent
2523                  * backref before dropping it (triggering a BUG_ON at
2524                  * insert_inline_extent_backref()).
2525                  */
2526                 spin_lock(&locked_ref->lock);
2527                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2528                                          locked_ref);
2529
2530                 /*
2531                  * locked_ref is the head node, so we have to go one
2532                  * node back for any delayed ref updates
2533                  */
2534                 ref = select_delayed_ref(locked_ref);
2535
2536                 if (ref && ref->seq &&
2537                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2538                         spin_unlock(&locked_ref->lock);
2539                         btrfs_delayed_ref_unlock(locked_ref);
2540                         spin_lock(&delayed_refs->lock);
2541                         locked_ref->processing = 0;
2542                         delayed_refs->num_heads_ready++;
2543                         spin_unlock(&delayed_refs->lock);
2544                         locked_ref = NULL;
2545                         cond_resched();
2546                         count++;
2547                         continue;
2548                 }
2549
2550                 /*
2551                  * record the must insert reserved flag before we
2552                  * drop the spin lock.
2553                  */
2554                 must_insert_reserved = locked_ref->must_insert_reserved;
2555                 locked_ref->must_insert_reserved = 0;
2556
2557                 extent_op = locked_ref->extent_op;
2558                 locked_ref->extent_op = NULL;
2559
2560                 if (!ref) {
2561
2562
2563                         /* All delayed refs have been processed, Go ahead
2564                          * and send the head node to run_one_delayed_ref,
2565                          * so that any accounting fixes can happen
2566                          */
2567                         ref = &locked_ref->node;
2568
2569                         if (extent_op && must_insert_reserved) {
2570                                 btrfs_free_delayed_extent_op(extent_op);
2571                                 extent_op = NULL;
2572                         }
2573
2574                         if (extent_op) {
2575                                 spin_unlock(&locked_ref->lock);
2576                                 ret = run_delayed_extent_op(trans, root,
2577                                                             ref, extent_op);
2578                                 btrfs_free_delayed_extent_op(extent_op);
2579
2580                                 if (ret) {
2581                                         /*
2582                                          * Need to reset must_insert_reserved if
2583                                          * there was an error so the abort stuff
2584                                          * can cleanup the reserved space
2585                                          * properly.
2586                                          */
2587                                         if (must_insert_reserved)
2588                                                 locked_ref->must_insert_reserved = 1;
2589                                         locked_ref->processing = 0;
2590                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2591                                         btrfs_delayed_ref_unlock(locked_ref);
2592                                         return ret;
2593                                 }
2594                                 continue;
2595                         }
2596
2597                         /*
2598                          * Need to drop our head ref lock and re-acquire the
2599                          * delayed ref lock and then re-check to make sure
2600                          * nobody got added.
2601                          */
2602                         spin_unlock(&locked_ref->lock);
2603                         spin_lock(&delayed_refs->lock);
2604                         spin_lock(&locked_ref->lock);
2605                         if (!list_empty(&locked_ref->ref_list) ||
2606                             locked_ref->extent_op) {
2607                                 spin_unlock(&locked_ref->lock);
2608                                 spin_unlock(&delayed_refs->lock);
2609                                 continue;
2610                         }
2611                         ref->in_tree = 0;
2612                         delayed_refs->num_heads--;
2613                         rb_erase(&locked_ref->href_node,
2614                                  &delayed_refs->href_root);
2615                         spin_unlock(&delayed_refs->lock);
2616                 } else {
2617                         actual_count++;
2618                         ref->in_tree = 0;
2619                         list_del(&ref->list);
2620                 }
2621                 atomic_dec(&delayed_refs->num_entries);
2622
2623                 if (!btrfs_delayed_ref_is_head(ref)) {
2624                         /*
2625                          * when we play the delayed ref, also correct the
2626                          * ref_mod on head
2627                          */
2628                         switch (ref->action) {
2629                         case BTRFS_ADD_DELAYED_REF:
2630                         case BTRFS_ADD_DELAYED_EXTENT:
2631                                 locked_ref->node.ref_mod -= ref->ref_mod;
2632                                 break;
2633                         case BTRFS_DROP_DELAYED_REF:
2634                                 locked_ref->node.ref_mod += ref->ref_mod;
2635                                 break;
2636                         default:
2637                                 WARN_ON(1);
2638                         }
2639                 }
2640                 spin_unlock(&locked_ref->lock);
2641
2642                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2643                                           must_insert_reserved);
2644
2645                 btrfs_free_delayed_extent_op(extent_op);
2646                 if (ret) {
2647                         locked_ref->processing = 0;
2648                         btrfs_delayed_ref_unlock(locked_ref);
2649                         btrfs_put_delayed_ref(ref);
2650                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2651                         return ret;
2652                 }
2653
2654                 /*
2655                  * If this node is a head, that means all the refs in this head
2656                  * have been dealt with, and we will pick the next head to deal
2657                  * with, so we must unlock the head and drop it from the cluster
2658                  * list before we release it.
2659                  */
2660                 if (btrfs_delayed_ref_is_head(ref)) {
2661                         if (locked_ref->is_data &&
2662                             locked_ref->total_ref_mod < 0) {
2663                                 spin_lock(&delayed_refs->lock);
2664                                 delayed_refs->pending_csums -= ref->num_bytes;
2665                                 spin_unlock(&delayed_refs->lock);
2666                         }
2667                         btrfs_delayed_ref_unlock(locked_ref);
2668                         locked_ref = NULL;
2669                 }
2670                 btrfs_put_delayed_ref(ref);
2671                 count++;
2672                 cond_resched();
2673         }
2674
2675         /*
2676          * We don't want to include ref heads since we can have empty ref heads
2677          * and those will drastically skew our runtime down since we just do
2678          * accounting, no actual extent tree updates.
2679          */
2680         if (actual_count > 0) {
2681                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2682                 u64 avg;
2683
2684                 /*
2685                  * We weigh the current average higher than our current runtime
2686                  * to avoid large swings in the average.
2687                  */
2688                 spin_lock(&delayed_refs->lock);
2689                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2690                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2691                 spin_unlock(&delayed_refs->lock);
2692         }
2693         return 0;
2694 }
2695
2696 #ifdef SCRAMBLE_DELAYED_REFS
2697 /*
2698  * Normally delayed refs get processed in ascending bytenr order. This
2699  * correlates in most cases to the order added. To expose dependencies on this
2700  * order, we start to process the tree in the middle instead of the beginning
2701  */
2702 static u64 find_middle(struct rb_root *root)
2703 {
2704         struct rb_node *n = root->rb_node;
2705         struct btrfs_delayed_ref_node *entry;
2706         int alt = 1;
2707         u64 middle;
2708         u64 first = 0, last = 0;
2709
2710         n = rb_first(root);
2711         if (n) {
2712                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2713                 first = entry->bytenr;
2714         }
2715         n = rb_last(root);
2716         if (n) {
2717                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2718                 last = entry->bytenr;
2719         }
2720         n = root->rb_node;
2721
2722         while (n) {
2723                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2724                 WARN_ON(!entry->in_tree);
2725
2726                 middle = entry->bytenr;
2727
2728                 if (alt)
2729                         n = n->rb_left;
2730                 else
2731                         n = n->rb_right;
2732
2733                 alt = 1 - alt;
2734         }
2735         return middle;
2736 }
2737 #endif
2738
2739 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2740 {
2741         u64 num_bytes;
2742
2743         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2744                              sizeof(struct btrfs_extent_inline_ref));
2745         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2746                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2747
2748         /*
2749          * We don't ever fill up leaves all the way so multiply by 2 just to be
2750          * closer to what we're really going to want to use.
2751          */
2752         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2753 }
2754
2755 /*
2756  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2757  * would require to store the csums for that many bytes.
2758  */
2759 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2760 {
2761         u64 csum_size;
2762         u64 num_csums_per_leaf;
2763         u64 num_csums;
2764
2765         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2766         num_csums_per_leaf = div64_u64(csum_size,
2767                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2768         num_csums = div64_u64(csum_bytes, root->sectorsize);
2769         num_csums += num_csums_per_leaf - 1;
2770         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2771         return num_csums;
2772 }
2773
2774 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2775                                        struct btrfs_root *root)
2776 {
2777         struct btrfs_block_rsv *global_rsv;
2778         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2779         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2780         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2781         u64 num_bytes, num_dirty_bgs_bytes;
2782         int ret = 0;
2783
2784         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2785         num_heads = heads_to_leaves(root, num_heads);
2786         if (num_heads > 1)
2787                 num_bytes += (num_heads - 1) * root->nodesize;
2788         num_bytes <<= 1;
2789         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2790         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2791                                                              num_dirty_bgs);
2792         global_rsv = &root->fs_info->global_block_rsv;
2793
2794         /*
2795          * If we can't allocate any more chunks lets make sure we have _lots_ of
2796          * wiggle room since running delayed refs can create more delayed refs.
2797          */
2798         if (global_rsv->space_info->full) {
2799                 num_dirty_bgs_bytes <<= 1;
2800                 num_bytes <<= 1;
2801         }
2802
2803         spin_lock(&global_rsv->lock);
2804         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2805                 ret = 1;
2806         spin_unlock(&global_rsv->lock);
2807         return ret;
2808 }
2809
2810 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2811                                        struct btrfs_root *root)
2812 {
2813         struct btrfs_fs_info *fs_info = root->fs_info;
2814         u64 num_entries =
2815                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2816         u64 avg_runtime;
2817         u64 val;
2818
2819         smp_mb();
2820         avg_runtime = fs_info->avg_delayed_ref_runtime;
2821         val = num_entries * avg_runtime;
2822         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2823                 return 1;
2824         if (val >= NSEC_PER_SEC / 2)
2825                 return 2;
2826
2827         return btrfs_check_space_for_delayed_refs(trans, root);
2828 }
2829
2830 struct async_delayed_refs {
2831         struct btrfs_root *root;
2832         int count;
2833         int error;
2834         int sync;
2835         struct completion wait;
2836         struct btrfs_work work;
2837 };
2838
2839 static void delayed_ref_async_start(struct btrfs_work *work)
2840 {
2841         struct async_delayed_refs *async;
2842         struct btrfs_trans_handle *trans;
2843         int ret;
2844
2845         async = container_of(work, struct async_delayed_refs, work);
2846
2847         trans = btrfs_join_transaction(async->root);
2848         if (IS_ERR(trans)) {
2849                 async->error = PTR_ERR(trans);
2850                 goto done;
2851         }
2852
2853         /*
2854          * trans->sync means that when we call end_transaction, we won't
2855          * wait on delayed refs
2856          */
2857         trans->sync = true;
2858         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2859         if (ret)
2860                 async->error = ret;
2861
2862         ret = btrfs_end_transaction(trans, async->root);
2863         if (ret && !async->error)
2864                 async->error = ret;
2865 done:
2866         if (async->sync)
2867                 complete(&async->wait);
2868         else
2869                 kfree(async);
2870 }
2871
2872 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2873                                  unsigned long count, int wait)
2874 {
2875         struct async_delayed_refs *async;
2876         int ret;
2877
2878         async = kmalloc(sizeof(*async), GFP_NOFS);
2879         if (!async)
2880                 return -ENOMEM;
2881
2882         async->root = root->fs_info->tree_root;
2883         async->count = count;
2884         async->error = 0;
2885         if (wait)
2886                 async->sync = 1;
2887         else
2888                 async->sync = 0;
2889         init_completion(&async->wait);
2890
2891         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2892                         delayed_ref_async_start, NULL, NULL);
2893
2894         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2895
2896         if (wait) {
2897                 wait_for_completion(&async->wait);
2898                 ret = async->error;
2899                 kfree(async);
2900                 return ret;
2901         }
2902         return 0;
2903 }
2904
2905 /*
2906  * this starts processing the delayed reference count updates and
2907  * extent insertions we have queued up so far.  count can be
2908  * 0, which means to process everything in the tree at the start
2909  * of the run (but not newly added entries), or it can be some target
2910  * number you'd like to process.
2911  *
2912  * Returns 0 on success or if called with an aborted transaction
2913  * Returns <0 on error and aborts the transaction
2914  */
2915 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2916                            struct btrfs_root *root, unsigned long count)
2917 {
2918         struct rb_node *node;
2919         struct btrfs_delayed_ref_root *delayed_refs;
2920         struct btrfs_delayed_ref_head *head;
2921         int ret;
2922         int run_all = count == (unsigned long)-1;
2923         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2924
2925         /* We'll clean this up in btrfs_cleanup_transaction */
2926         if (trans->aborted)
2927                 return 0;
2928
2929         if (root->fs_info->creating_free_space_tree)
2930                 return 0;
2931
2932         if (root == root->fs_info->extent_root)
2933                 root = root->fs_info->tree_root;
2934
2935         delayed_refs = &trans->transaction->delayed_refs;
2936         if (count == 0)
2937                 count = atomic_read(&delayed_refs->num_entries) * 2;
2938
2939 again:
2940 #ifdef SCRAMBLE_DELAYED_REFS
2941         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2942 #endif
2943         trans->can_flush_pending_bgs = false;
2944         ret = __btrfs_run_delayed_refs(trans, root, count);
2945         if (ret < 0) {
2946                 btrfs_abort_transaction(trans, root, ret);
2947                 return ret;
2948         }
2949
2950         if (run_all) {
2951                 if (!list_empty(&trans->new_bgs))
2952                         btrfs_create_pending_block_groups(trans, root);
2953
2954                 spin_lock(&delayed_refs->lock);
2955                 node = rb_first(&delayed_refs->href_root);
2956                 if (!node) {
2957                         spin_unlock(&delayed_refs->lock);
2958                         goto out;
2959                 }
2960                 count = (unsigned long)-1;
2961
2962                 while (node) {
2963                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2964                                         href_node);
2965                         if (btrfs_delayed_ref_is_head(&head->node)) {
2966                                 struct btrfs_delayed_ref_node *ref;
2967
2968                                 ref = &head->node;
2969                                 atomic_inc(&ref->refs);
2970
2971                                 spin_unlock(&delayed_refs->lock);
2972                                 /*
2973                                  * Mutex was contended, block until it's
2974                                  * released and try again
2975                                  */
2976                                 mutex_lock(&head->mutex);
2977                                 mutex_unlock(&head->mutex);
2978
2979                                 btrfs_put_delayed_ref(ref);
2980                                 cond_resched();
2981                                 goto again;
2982                         } else {
2983                                 WARN_ON(1);
2984                         }
2985                         node = rb_next(node);
2986                 }
2987                 spin_unlock(&delayed_refs->lock);
2988                 cond_resched();
2989                 goto again;
2990         }
2991 out:
2992         assert_qgroups_uptodate(trans);
2993         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2994         return 0;
2995 }
2996
2997 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2998                                 struct btrfs_root *root,
2999                                 u64 bytenr, u64 num_bytes, u64 flags,
3000                                 int level, int is_data)
3001 {
3002         struct btrfs_delayed_extent_op *extent_op;
3003         int ret;
3004
3005         extent_op = btrfs_alloc_delayed_extent_op();
3006         if (!extent_op)
3007                 return -ENOMEM;
3008
3009         extent_op->flags_to_set = flags;
3010         extent_op->update_flags = true;
3011         extent_op->update_key = false;
3012         extent_op->is_data = is_data ? true : false;
3013         extent_op->level = level;
3014
3015         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3016                                           num_bytes, extent_op);
3017         if (ret)
3018                 btrfs_free_delayed_extent_op(extent_op);
3019         return ret;
3020 }
3021
3022 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3023                                       struct btrfs_root *root,
3024                                       struct btrfs_path *path,
3025                                       u64 objectid, u64 offset, u64 bytenr)
3026 {
3027         struct btrfs_delayed_ref_head *head;
3028         struct btrfs_delayed_ref_node *ref;
3029         struct btrfs_delayed_data_ref *data_ref;
3030         struct btrfs_delayed_ref_root *delayed_refs;
3031         int ret = 0;
3032
3033         delayed_refs = &trans->transaction->delayed_refs;
3034         spin_lock(&delayed_refs->lock);
3035         head = btrfs_find_delayed_ref_head(trans, bytenr);
3036         if (!head) {
3037                 spin_unlock(&delayed_refs->lock);
3038                 return 0;
3039         }
3040
3041         if (!mutex_trylock(&head->mutex)) {
3042                 atomic_inc(&head->node.refs);
3043                 spin_unlock(&delayed_refs->lock);
3044
3045                 btrfs_release_path(path);
3046
3047                 /*
3048                  * Mutex was contended, block until it's released and let
3049                  * caller try again
3050                  */
3051                 mutex_lock(&head->mutex);
3052                 mutex_unlock(&head->mutex);
3053                 btrfs_put_delayed_ref(&head->node);
3054                 return -EAGAIN;
3055         }
3056         spin_unlock(&delayed_refs->lock);
3057
3058         spin_lock(&head->lock);
3059         list_for_each_entry(ref, &head->ref_list, list) {
3060                 /* If it's a shared ref we know a cross reference exists */
3061                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3062                         ret = 1;
3063                         break;
3064                 }
3065
3066                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3067
3068                 /*
3069                  * If our ref doesn't match the one we're currently looking at
3070                  * then we have a cross reference.
3071                  */
3072                 if (data_ref->root != root->root_key.objectid ||
3073                     data_ref->objectid != objectid ||
3074                     data_ref->offset != offset) {
3075                         ret = 1;
3076                         break;
3077                 }
3078         }
3079         spin_unlock(&head->lock);
3080         mutex_unlock(&head->mutex);
3081         return ret;
3082 }
3083
3084 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3085                                         struct btrfs_root *root,
3086                                         struct btrfs_path *path,
3087                                         u64 objectid, u64 offset, u64 bytenr)
3088 {
3089         struct btrfs_root *extent_root = root->fs_info->extent_root;
3090         struct extent_buffer *leaf;
3091         struct btrfs_extent_data_ref *ref;
3092         struct btrfs_extent_inline_ref *iref;
3093         struct btrfs_extent_item *ei;
3094         struct btrfs_key key;
3095         u32 item_size;
3096         int ret;
3097
3098         key.objectid = bytenr;
3099         key.offset = (u64)-1;
3100         key.type = BTRFS_EXTENT_ITEM_KEY;
3101
3102         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3103         if (ret < 0)
3104                 goto out;
3105         BUG_ON(ret == 0); /* Corruption */
3106
3107         ret = -ENOENT;
3108         if (path->slots[0] == 0)
3109                 goto out;
3110
3111         path->slots[0]--;
3112         leaf = path->nodes[0];
3113         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3114
3115         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3116                 goto out;
3117
3118         ret = 1;
3119         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3120 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3121         if (item_size < sizeof(*ei)) {
3122                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3123                 goto out;
3124         }
3125 #endif
3126         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3127
3128         if (item_size != sizeof(*ei) +
3129             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3130                 goto out;
3131
3132         if (btrfs_extent_generation(leaf, ei) <=
3133             btrfs_root_last_snapshot(&root->root_item))
3134                 goto out;
3135
3136         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3137         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3138             BTRFS_EXTENT_DATA_REF_KEY)
3139                 goto out;
3140
3141         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3142         if (btrfs_extent_refs(leaf, ei) !=
3143             btrfs_extent_data_ref_count(leaf, ref) ||
3144             btrfs_extent_data_ref_root(leaf, ref) !=
3145             root->root_key.objectid ||
3146             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3147             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3148                 goto out;
3149
3150         ret = 0;
3151 out:
3152         return ret;
3153 }
3154
3155 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3156                           struct btrfs_root *root,
3157                           u64 objectid, u64 offset, u64 bytenr)
3158 {
3159         struct btrfs_path *path;
3160         int ret;
3161         int ret2;
3162
3163         path = btrfs_alloc_path();
3164         if (!path)
3165                 return -ENOENT;
3166
3167         do {
3168                 ret = check_committed_ref(trans, root, path, objectid,
3169                                           offset, bytenr);
3170                 if (ret && ret != -ENOENT)
3171                         goto out;
3172
3173                 ret2 = check_delayed_ref(trans, root, path, objectid,
3174                                          offset, bytenr);
3175         } while (ret2 == -EAGAIN);
3176
3177         if (ret2 && ret2 != -ENOENT) {
3178                 ret = ret2;
3179                 goto out;
3180         }
3181
3182         if (ret != -ENOENT || ret2 != -ENOENT)
3183                 ret = 0;
3184 out:
3185         btrfs_free_path(path);
3186         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3187                 WARN_ON(ret > 0);
3188         return ret;
3189 }
3190
3191 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3192                            struct btrfs_root *root,
3193                            struct extent_buffer *buf,
3194                            int full_backref, int inc)
3195 {
3196         u64 bytenr;
3197         u64 num_bytes;
3198         u64 parent;
3199         u64 ref_root;
3200         u32 nritems;
3201         struct btrfs_key key;
3202         struct btrfs_file_extent_item *fi;
3203         int i;
3204         int level;
3205         int ret = 0;
3206         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3207                             u64, u64, u64, u64, u64, u64);
3208
3209
3210         if (btrfs_test_is_dummy_root(root))
3211                 return 0;
3212
3213         ref_root = btrfs_header_owner(buf);
3214         nritems = btrfs_header_nritems(buf);
3215         level = btrfs_header_level(buf);
3216
3217         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3218                 return 0;
3219
3220         if (inc)
3221                 process_func = btrfs_inc_extent_ref;
3222         else
3223                 process_func = btrfs_free_extent;
3224
3225         if (full_backref)
3226                 parent = buf->start;
3227         else
3228                 parent = 0;
3229
3230         for (i = 0; i < nritems; i++) {
3231                 if (level == 0) {
3232                         btrfs_item_key_to_cpu(buf, &key, i);
3233                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3234                                 continue;
3235                         fi = btrfs_item_ptr(buf, i,
3236                                             struct btrfs_file_extent_item);
3237                         if (btrfs_file_extent_type(buf, fi) ==
3238                             BTRFS_FILE_EXTENT_INLINE)
3239                                 continue;
3240                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3241                         if (bytenr == 0)
3242                                 continue;
3243
3244                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3245                         key.offset -= btrfs_file_extent_offset(buf, fi);
3246                         ret = process_func(trans, root, bytenr, num_bytes,
3247                                            parent, ref_root, key.objectid,
3248                                            key.offset);
3249                         if (ret)
3250                                 goto fail;
3251                 } else {
3252                         bytenr = btrfs_node_blockptr(buf, i);
3253                         num_bytes = root->nodesize;
3254                         ret = process_func(trans, root, bytenr, num_bytes,
3255                                            parent, ref_root, level - 1, 0);
3256                         if (ret)
3257                                 goto fail;
3258                 }
3259         }
3260         return 0;
3261 fail:
3262         return ret;
3263 }
3264
3265 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3266                   struct extent_buffer *buf, int full_backref)
3267 {
3268         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3269 }
3270
3271 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3272                   struct extent_buffer *buf, int full_backref)
3273 {
3274         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3275 }
3276
3277 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3278                                  struct btrfs_root *root,
3279                                  struct btrfs_path *path,
3280                                  struct btrfs_block_group_cache *cache)
3281 {
3282         int ret;
3283         struct btrfs_root *extent_root = root->fs_info->extent_root;
3284         unsigned long bi;
3285         struct extent_buffer *leaf;
3286
3287         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3288         if (ret) {
3289                 if (ret > 0)
3290                         ret = -ENOENT;
3291                 goto fail;
3292         }
3293
3294         leaf = path->nodes[0];
3295         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3296         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3297         btrfs_mark_buffer_dirty(leaf);
3298 fail:
3299         btrfs_release_path(path);
3300         return ret;
3301
3302 }
3303
3304 static struct btrfs_block_group_cache *
3305 next_block_group(struct btrfs_root *root,
3306                  struct btrfs_block_group_cache *cache)
3307 {
3308         struct rb_node *node;
3309
3310         spin_lock(&root->fs_info->block_group_cache_lock);
3311
3312         /* If our block group was removed, we need a full search. */
3313         if (RB_EMPTY_NODE(&cache->cache_node)) {
3314                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3315
3316                 spin_unlock(&root->fs_info->block_group_cache_lock);
3317                 btrfs_put_block_group(cache);
3318                 cache = btrfs_lookup_first_block_group(root->fs_info,
3319                                                        next_bytenr);
3320                 return cache;
3321         }
3322         node = rb_next(&cache->cache_node);
3323         btrfs_put_block_group(cache);
3324         if (node) {
3325                 cache = rb_entry(node, struct btrfs_block_group_cache,
3326                                  cache_node);
3327                 btrfs_get_block_group(cache);
3328         } else
3329                 cache = NULL;
3330         spin_unlock(&root->fs_info->block_group_cache_lock);
3331         return cache;
3332 }
3333
3334 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3335                             struct btrfs_trans_handle *trans,
3336                             struct btrfs_path *path)
3337 {
3338         struct btrfs_root *root = block_group->fs_info->tree_root;
3339         struct inode *inode = NULL;
3340         u64 alloc_hint = 0;
3341         int dcs = BTRFS_DC_ERROR;
3342         u64 num_pages = 0;
3343         int retries = 0;
3344         int ret = 0;
3345
3346         /*
3347          * If this block group is smaller than 100 megs don't bother caching the
3348          * block group.
3349          */
3350         if (block_group->key.offset < (100 * SZ_1M)) {
3351                 spin_lock(&block_group->lock);
3352                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3353                 spin_unlock(&block_group->lock);
3354                 return 0;
3355         }
3356
3357         if (trans->aborted)
3358                 return 0;
3359 again:
3360         inode = lookup_free_space_inode(root, block_group, path);
3361         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3362                 ret = PTR_ERR(inode);
3363                 btrfs_release_path(path);
3364                 goto out;
3365         }
3366
3367         if (IS_ERR(inode)) {
3368                 BUG_ON(retries);
3369                 retries++;
3370
3371                 if (block_group->ro)
3372                         goto out_free;
3373
3374                 ret = create_free_space_inode(root, trans, block_group, path);
3375                 if (ret)
3376                         goto out_free;
3377                 goto again;
3378         }
3379
3380         /* We've already setup this transaction, go ahead and exit */
3381         if (block_group->cache_generation == trans->transid &&
3382             i_size_read(inode)) {
3383                 dcs = BTRFS_DC_SETUP;
3384                 goto out_put;
3385         }
3386
3387         /*
3388          * We want to set the generation to 0, that way if anything goes wrong
3389          * from here on out we know not to trust this cache when we load up next
3390          * time.
3391          */
3392         BTRFS_I(inode)->generation = 0;
3393         ret = btrfs_update_inode(trans, root, inode);
3394         if (ret) {
3395                 /*
3396                  * So theoretically we could recover from this, simply set the
3397                  * super cache generation to 0 so we know to invalidate the
3398                  * cache, but then we'd have to keep track of the block groups
3399                  * that fail this way so we know we _have_ to reset this cache
3400                  * before the next commit or risk reading stale cache.  So to
3401                  * limit our exposure to horrible edge cases lets just abort the
3402                  * transaction, this only happens in really bad situations
3403                  * anyway.
3404                  */
3405                 btrfs_abort_transaction(trans, root, ret);
3406                 goto out_put;
3407         }
3408         WARN_ON(ret);
3409
3410         if (i_size_read(inode) > 0) {
3411                 ret = btrfs_check_trunc_cache_free_space(root,
3412                                         &root->fs_info->global_block_rsv);
3413                 if (ret)
3414                         goto out_put;
3415
3416                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3417                 if (ret)
3418                         goto out_put;
3419         }
3420
3421         spin_lock(&block_group->lock);
3422         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3423             !btrfs_test_opt(root, SPACE_CACHE)) {
3424                 /*
3425                  * don't bother trying to write stuff out _if_
3426                  * a) we're not cached,
3427                  * b) we're with nospace_cache mount option.
3428                  */
3429                 dcs = BTRFS_DC_WRITTEN;
3430                 spin_unlock(&block_group->lock);
3431                 goto out_put;
3432         }
3433         spin_unlock(&block_group->lock);
3434
3435         /*
3436          * We hit an ENOSPC when setting up the cache in this transaction, just
3437          * skip doing the setup, we've already cleared the cache so we're safe.
3438          */
3439         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3440                 ret = -ENOSPC;
3441                 goto out_put;
3442         }
3443
3444         /*
3445          * Try to preallocate enough space based on how big the block group is.
3446          * Keep in mind this has to include any pinned space which could end up
3447          * taking up quite a bit since it's not folded into the other space
3448          * cache.
3449          */
3450         num_pages = div_u64(block_group->key.offset, SZ_256M);
3451         if (!num_pages)
3452                 num_pages = 1;
3453
3454         num_pages *= 16;
3455         num_pages *= PAGE_SIZE;
3456
3457         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3458         if (ret)
3459                 goto out_put;
3460
3461         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3462                                               num_pages, num_pages,
3463                                               &alloc_hint);
3464         /*
3465          * Our cache requires contiguous chunks so that we don't modify a bunch
3466          * of metadata or split extents when writing the cache out, which means
3467          * we can enospc if we are heavily fragmented in addition to just normal
3468          * out of space conditions.  So if we hit this just skip setting up any
3469          * other block groups for this transaction, maybe we'll unpin enough
3470          * space the next time around.
3471          */
3472         if (!ret)
3473                 dcs = BTRFS_DC_SETUP;
3474         else if (ret == -ENOSPC)
3475                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3476         btrfs_free_reserved_data_space(inode, 0, num_pages);
3477
3478 out_put:
3479         iput(inode);
3480 out_free:
3481         btrfs_release_path(path);
3482 out:
3483         spin_lock(&block_group->lock);
3484         if (!ret && dcs == BTRFS_DC_SETUP)
3485                 block_group->cache_generation = trans->transid;
3486         block_group->disk_cache_state = dcs;
3487         spin_unlock(&block_group->lock);
3488
3489         return ret;
3490 }
3491
3492 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3493                             struct btrfs_root *root)
3494 {
3495         struct btrfs_block_group_cache *cache, *tmp;
3496         struct btrfs_transaction *cur_trans = trans->transaction;
3497         struct btrfs_path *path;
3498
3499         if (list_empty(&cur_trans->dirty_bgs) ||
3500             !btrfs_test_opt(root, SPACE_CACHE))
3501                 return 0;
3502
3503         path = btrfs_alloc_path();
3504         if (!path)
3505                 return -ENOMEM;
3506
3507         /* Could add new block groups, use _safe just in case */
3508         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3509                                  dirty_list) {
3510                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3511                         cache_save_setup(cache, trans, path);
3512         }
3513
3514         btrfs_free_path(path);
3515         return 0;
3516 }
3517
3518 /*
3519  * transaction commit does final block group cache writeback during a
3520  * critical section where nothing is allowed to change the FS.  This is
3521  * required in order for the cache to actually match the block group,
3522  * but can introduce a lot of latency into the commit.
3523  *
3524  * So, btrfs_start_dirty_block_groups is here to kick off block group
3525  * cache IO.  There's a chance we'll have to redo some of it if the
3526  * block group changes again during the commit, but it greatly reduces
3527  * the commit latency by getting rid of the easy block groups while
3528  * we're still allowing others to join the commit.
3529  */
3530 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3531                                    struct btrfs_root *root)
3532 {
3533         struct btrfs_block_group_cache *cache;
3534         struct btrfs_transaction *cur_trans = trans->transaction;
3535         int ret = 0;
3536         int should_put;
3537         struct btrfs_path *path = NULL;
3538         LIST_HEAD(dirty);
3539         struct list_head *io = &cur_trans->io_bgs;
3540         int num_started = 0;
3541         int loops = 0;
3542
3543         spin_lock(&cur_trans->dirty_bgs_lock);
3544         if (list_empty(&cur_trans->dirty_bgs)) {
3545                 spin_unlock(&cur_trans->dirty_bgs_lock);
3546                 return 0;
3547         }
3548         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3549         spin_unlock(&cur_trans->dirty_bgs_lock);
3550
3551 again:
3552         /*
3553          * make sure all the block groups on our dirty list actually
3554          * exist
3555          */
3556         btrfs_create_pending_block_groups(trans, root);
3557
3558         if (!path) {
3559                 path = btrfs_alloc_path();
3560                 if (!path)
3561                         return -ENOMEM;
3562         }
3563
3564         /*
3565          * cache_write_mutex is here only to save us from balance or automatic
3566          * removal of empty block groups deleting this block group while we are
3567          * writing out the cache
3568          */
3569         mutex_lock(&trans->transaction->cache_write_mutex);
3570         while (!list_empty(&dirty)) {
3571                 cache = list_first_entry(&dirty,
3572                                          struct btrfs_block_group_cache,
3573                                          dirty_list);
3574                 /*
3575                  * this can happen if something re-dirties a block
3576                  * group that is already under IO.  Just wait for it to
3577                  * finish and then do it all again
3578                  */
3579                 if (!list_empty(&cache->io_list)) {
3580                         list_del_init(&cache->io_list);
3581                         btrfs_wait_cache_io(root, trans, cache,
3582                                             &cache->io_ctl, path,
3583                                             cache->key.objectid);
3584                         btrfs_put_block_group(cache);
3585                 }
3586
3587
3588                 /*
3589                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3590                  * if it should update the cache_state.  Don't delete
3591                  * until after we wait.
3592                  *
3593                  * Since we're not running in the commit critical section
3594                  * we need the dirty_bgs_lock to protect from update_block_group
3595                  */
3596                 spin_lock(&cur_trans->dirty_bgs_lock);
3597                 list_del_init(&cache->dirty_list);
3598                 spin_unlock(&cur_trans->dirty_bgs_lock);
3599
3600                 should_put = 1;
3601
3602                 cache_save_setup(cache, trans, path);
3603
3604                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3605                         cache->io_ctl.inode = NULL;
3606                         ret = btrfs_write_out_cache(root, trans, cache, path);
3607                         if (ret == 0 && cache->io_ctl.inode) {
3608                                 num_started++;
3609                                 should_put = 0;
3610
3611                                 /*
3612                                  * the cache_write_mutex is protecting
3613                                  * the io_list
3614                                  */
3615                                 list_add_tail(&cache->io_list, io);
3616                         } else {
3617                                 /*
3618                                  * if we failed to write the cache, the
3619                                  * generation will be bad and life goes on
3620                                  */
3621                                 ret = 0;
3622                         }
3623                 }
3624                 if (!ret) {
3625                         ret = write_one_cache_group(trans, root, path, cache);
3626                         /*
3627                          * Our block group might still be attached to the list
3628                          * of new block groups in the transaction handle of some
3629                          * other task (struct btrfs_trans_handle->new_bgs). This
3630                          * means its block group item isn't yet in the extent
3631                          * tree. If this happens ignore the error, as we will
3632                          * try again later in the critical section of the
3633                          * transaction commit.
3634                          */
3635                         if (ret == -ENOENT) {
3636                                 ret = 0;
3637                                 spin_lock(&cur_trans->dirty_bgs_lock);
3638                                 if (list_empty(&cache->dirty_list)) {
3639                                         list_add_tail(&cache->dirty_list,
3640                                                       &cur_trans->dirty_bgs);
3641                                         btrfs_get_block_group(cache);
3642                                 }
3643                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3644                         } else if (ret) {
3645                                 btrfs_abort_transaction(trans, root, ret);
3646                         }
3647                 }
3648
3649                 /* if its not on the io list, we need to put the block group */
3650                 if (should_put)
3651                         btrfs_put_block_group(cache);
3652
3653                 if (ret)
3654                         break;
3655
3656                 /*
3657                  * Avoid blocking other tasks for too long. It might even save
3658                  * us from writing caches for block groups that are going to be
3659                  * removed.
3660                  */
3661                 mutex_unlock(&trans->transaction->cache_write_mutex);
3662                 mutex_lock(&trans->transaction->cache_write_mutex);
3663         }
3664         mutex_unlock(&trans->transaction->cache_write_mutex);
3665
3666         /*
3667          * go through delayed refs for all the stuff we've just kicked off
3668          * and then loop back (just once)
3669          */
3670         ret = btrfs_run_delayed_refs(trans, root, 0);
3671         if (!ret && loops == 0) {
3672                 loops++;
3673                 spin_lock(&cur_trans->dirty_bgs_lock);
3674                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3675                 /*
3676                  * dirty_bgs_lock protects us from concurrent block group
3677                  * deletes too (not just cache_write_mutex).
3678                  */
3679                 if (!list_empty(&dirty)) {
3680                         spin_unlock(&cur_trans->dirty_bgs_lock);
3681                         goto again;
3682                 }
3683                 spin_unlock(&cur_trans->dirty_bgs_lock);
3684         }
3685
3686         btrfs_free_path(path);
3687         return ret;
3688 }
3689
3690 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3691                                    struct btrfs_root *root)
3692 {
3693         struct btrfs_block_group_cache *cache;
3694         struct btrfs_transaction *cur_trans = trans->transaction;
3695         int ret = 0;
3696         int should_put;
3697         struct btrfs_path *path;
3698         struct list_head *io = &cur_trans->io_bgs;
3699         int num_started = 0;
3700
3701         path = btrfs_alloc_path();
3702         if (!path)
3703                 return -ENOMEM;
3704
3705         /*
3706          * Even though we are in the critical section of the transaction commit,
3707          * we can still have concurrent tasks adding elements to this
3708          * transaction's list of dirty block groups. These tasks correspond to
3709          * endio free space workers started when writeback finishes for a
3710          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3711          * allocate new block groups as a result of COWing nodes of the root
3712          * tree when updating the free space inode. The writeback for the space
3713          * caches is triggered by an earlier call to
3714          * btrfs_start_dirty_block_groups() and iterations of the following
3715          * loop.
3716          * Also we want to do the cache_save_setup first and then run the
3717          * delayed refs to make sure we have the best chance at doing this all
3718          * in one shot.
3719          */
3720         spin_lock(&cur_trans->dirty_bgs_lock);
3721         while (!list_empty(&cur_trans->dirty_bgs)) {
3722                 cache = list_first_entry(&cur_trans->dirty_bgs,
3723                                          struct btrfs_block_group_cache,
3724                                          dirty_list);
3725
3726                 /*
3727                  * this can happen if cache_save_setup re-dirties a block
3728                  * group that is already under IO.  Just wait for it to
3729                  * finish and then do it all again
3730                  */
3731                 if (!list_empty(&cache->io_list)) {
3732                         spin_unlock(&cur_trans->dirty_bgs_lock);
3733                         list_del_init(&cache->io_list);
3734                         btrfs_wait_cache_io(root, trans, cache,
3735                                             &cache->io_ctl, path,
3736                                             cache->key.objectid);
3737                         btrfs_put_block_group(cache);
3738                         spin_lock(&cur_trans->dirty_bgs_lock);
3739                 }
3740
3741                 /*
3742                  * don't remove from the dirty list until after we've waited
3743                  * on any pending IO
3744                  */
3745                 list_del_init(&cache->dirty_list);
3746                 spin_unlock(&cur_trans->dirty_bgs_lock);
3747                 should_put = 1;
3748
3749                 cache_save_setup(cache, trans, path);
3750
3751                 if (!ret)
3752                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3753
3754                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3755                         cache->io_ctl.inode = NULL;
3756                         ret = btrfs_write_out_cache(root, trans, cache, path);
3757                         if (ret == 0 && cache->io_ctl.inode) {
3758                                 num_started++;
3759                                 should_put = 0;
3760                                 list_add_tail(&cache->io_list, io);
3761                         } else {
3762                                 /*
3763                                  * if we failed to write the cache, the
3764                                  * generation will be bad and life goes on
3765                                  */
3766                                 ret = 0;
3767                         }
3768                 }
3769                 if (!ret) {
3770                         ret = write_one_cache_group(trans, root, path, cache);
3771                         /*
3772                          * One of the free space endio workers might have
3773                          * created a new block group while updating a free space
3774                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3775                          * and hasn't released its transaction handle yet, in
3776                          * which case the new block group is still attached to
3777                          * its transaction handle and its creation has not
3778                          * finished yet (no block group item in the extent tree
3779                          * yet, etc). If this is the case, wait for all free
3780                          * space endio workers to finish and retry. This is a
3781                          * a very rare case so no need for a more efficient and
3782                          * complex approach.
3783                          */
3784                         if (ret == -ENOENT) {
3785                                 wait_event(cur_trans->writer_wait,
3786                                    atomic_read(&cur_trans->num_writers) == 1);
3787                                 ret = write_one_cache_group(trans, root, path,
3788                                                             cache);
3789                         }
3790                         if (ret)
3791                                 btrfs_abort_transaction(trans, root, ret);
3792                 }
3793
3794                 /* if its not on the io list, we need to put the block group */
3795                 if (should_put)
3796                         btrfs_put_block_group(cache);
3797                 spin_lock(&cur_trans->dirty_bgs_lock);
3798         }
3799         spin_unlock(&cur_trans->dirty_bgs_lock);
3800
3801         while (!list_empty(io)) {
3802                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3803                                          io_list);
3804                 list_del_init(&cache->io_list);
3805                 btrfs_wait_cache_io(root, trans, cache,
3806                                     &cache->io_ctl, path, cache->key.objectid);
3807                 btrfs_put_block_group(cache);
3808         }
3809
3810         btrfs_free_path(path);
3811         return ret;
3812 }
3813
3814 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3815 {
3816         struct btrfs_block_group_cache *block_group;
3817         int readonly = 0;
3818
3819         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3820         if (!block_group || block_group->ro)
3821                 readonly = 1;
3822         if (block_group)
3823                 btrfs_put_block_group(block_group);
3824         return readonly;
3825 }
3826
3827 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3828 {
3829         struct btrfs_block_group_cache *bg;
3830         bool ret = true;
3831
3832         bg = btrfs_lookup_block_group(fs_info, bytenr);
3833         if (!bg)
3834                 return false;
3835
3836         spin_lock(&bg->lock);
3837         if (bg->ro)
3838                 ret = false;
3839         else
3840                 atomic_inc(&bg->nocow_writers);
3841         spin_unlock(&bg->lock);
3842
3843         /* no put on block group, done by btrfs_dec_nocow_writers */
3844         if (!ret)
3845                 btrfs_put_block_group(bg);
3846
3847         return ret;
3848
3849 }
3850
3851 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3852 {
3853         struct btrfs_block_group_cache *bg;
3854
3855         bg = btrfs_lookup_block_group(fs_info, bytenr);
3856         ASSERT(bg);
3857         if (atomic_dec_and_test(&bg->nocow_writers))
3858                 wake_up_atomic_t(&bg->nocow_writers);
3859         /*
3860          * Once for our lookup and once for the lookup done by a previous call
3861          * to btrfs_inc_nocow_writers()
3862          */
3863         btrfs_put_block_group(bg);
3864         btrfs_put_block_group(bg);
3865 }
3866
3867 static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
3868 {
3869         schedule();
3870         return 0;
3871 }
3872
3873 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3874 {
3875         wait_on_atomic_t(&bg->nocow_writers,
3876                          btrfs_wait_nocow_writers_atomic_t,
3877                          TASK_UNINTERRUPTIBLE);
3878 }
3879
3880 static const char *alloc_name(u64 flags)
3881 {
3882         switch (flags) {
3883         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3884                 return "mixed";
3885         case BTRFS_BLOCK_GROUP_METADATA:
3886                 return "metadata";
3887         case BTRFS_BLOCK_GROUP_DATA:
3888                 return "data";
3889         case BTRFS_BLOCK_GROUP_SYSTEM:
3890                 return "system";
3891         default:
3892                 WARN_ON(1);
3893                 return "invalid-combination";
3894         };
3895 }
3896
3897 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3898                              u64 total_bytes, u64 bytes_used,
3899                              struct btrfs_space_info **space_info)
3900 {
3901         struct btrfs_space_info *found;
3902         int i;
3903         int factor;
3904         int ret;
3905
3906         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3907                      BTRFS_BLOCK_GROUP_RAID10))
3908                 factor = 2;
3909         else
3910                 factor = 1;
3911
3912         found = __find_space_info(info, flags);
3913         if (found) {
3914                 spin_lock(&found->lock);
3915                 found->total_bytes += total_bytes;
3916                 found->disk_total += total_bytes * factor;
3917                 found->bytes_used += bytes_used;
3918                 found->disk_used += bytes_used * factor;
3919                 if (total_bytes > 0)
3920                         found->full = 0;
3921                 spin_unlock(&found->lock);
3922                 *space_info = found;
3923                 return 0;
3924         }
3925         found = kzalloc(sizeof(*found), GFP_NOFS);
3926         if (!found)
3927                 return -ENOMEM;
3928
3929         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3930         if (ret) {
3931                 kfree(found);
3932                 return ret;
3933         }
3934
3935         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3936                 INIT_LIST_HEAD(&found->block_groups[i]);
3937         init_rwsem(&found->groups_sem);
3938         spin_lock_init(&found->lock);
3939         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3940         found->total_bytes = total_bytes;
3941         found->disk_total = total_bytes * factor;
3942         found->bytes_used = bytes_used;
3943         found->disk_used = bytes_used * factor;
3944         found->bytes_pinned = 0;
3945         found->bytes_reserved = 0;
3946         found->bytes_readonly = 0;
3947         found->bytes_may_use = 0;
3948         found->full = 0;
3949         found->max_extent_size = 0;
3950         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3951         found->chunk_alloc = 0;
3952         found->flush = 0;
3953         init_waitqueue_head(&found->wait);
3954         INIT_LIST_HEAD(&found->ro_bgs);
3955
3956         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3957                                     info->space_info_kobj, "%s",
3958                                     alloc_name(found->flags));
3959         if (ret) {
3960                 kfree(found);
3961                 return ret;
3962         }
3963
3964         *space_info = found;
3965         list_add_rcu(&found->list, &info->space_info);
3966         if (flags & BTRFS_BLOCK_GROUP_DATA)
3967                 info->data_sinfo = found;
3968
3969         return ret;
3970 }
3971
3972 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3973 {
3974         u64 extra_flags = chunk_to_extended(flags) &
3975                                 BTRFS_EXTENDED_PROFILE_MASK;
3976
3977         write_seqlock(&fs_info->profiles_lock);
3978         if (flags & BTRFS_BLOCK_GROUP_DATA)
3979                 fs_info->avail_data_alloc_bits |= extra_flags;
3980         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3981                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3982         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3983                 fs_info->avail_system_alloc_bits |= extra_flags;
3984         write_sequnlock(&fs_info->profiles_lock);
3985 }
3986
3987 /*
3988  * returns target flags in extended format or 0 if restripe for this
3989  * chunk_type is not in progress
3990  *
3991  * should be called with either volume_mutex or balance_lock held
3992  */
3993 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3994 {
3995         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3996         u64 target = 0;
3997
3998         if (!bctl)
3999                 return 0;
4000
4001         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4002             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4003                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4004         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4005                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4006                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4007         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4008                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4009                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4010         }
4011
4012         return target;
4013 }
4014
4015 /*
4016  * @flags: available profiles in extended format (see ctree.h)
4017  *
4018  * Returns reduced profile in chunk format.  If profile changing is in
4019  * progress (either running or paused) picks the target profile (if it's
4020  * already available), otherwise falls back to plain reducing.
4021  */
4022 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
4023 {
4024         u64 num_devices = root->fs_info->fs_devices->rw_devices;
4025         u64 target;
4026         u64 raid_type;
4027         u64 allowed = 0;
4028
4029         /*
4030          * see if restripe for this chunk_type is in progress, if so
4031          * try to reduce to the target profile
4032          */
4033         spin_lock(&root->fs_info->balance_lock);
4034         target = get_restripe_target(root->fs_info, flags);
4035         if (target) {
4036                 /* pick target profile only if it's already available */
4037                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4038                         spin_unlock(&root->fs_info->balance_lock);
4039                         return extended_to_chunk(target);
4040                 }
4041         }
4042         spin_unlock(&root->fs_info->balance_lock);
4043
4044         /* First, mask out the RAID levels which aren't possible */
4045         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4046                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4047                         allowed |= btrfs_raid_group[raid_type];
4048         }
4049         allowed &= flags;
4050
4051         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4052                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4053         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4054                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4055         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4056                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4057         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4058                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4059         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4060                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4061
4062         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4063
4064         return extended_to_chunk(flags | allowed);
4065 }
4066
4067 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
4068 {
4069         unsigned seq;
4070         u64 flags;
4071
4072         do {
4073                 flags = orig_flags;
4074                 seq = read_seqbegin(&root->fs_info->profiles_lock);
4075
4076                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4077                         flags |= root->fs_info->avail_data_alloc_bits;
4078                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4079                         flags |= root->fs_info->avail_system_alloc_bits;
4080                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4081                         flags |= root->fs_info->avail_metadata_alloc_bits;
4082         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
4083
4084         return btrfs_reduce_alloc_profile(root, flags);
4085 }
4086
4087 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4088 {
4089         u64 flags;
4090         u64 ret;
4091
4092         if (data)
4093                 flags = BTRFS_BLOCK_GROUP_DATA;
4094         else if (root == root->fs_info->chunk_root)
4095                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4096         else
4097                 flags = BTRFS_BLOCK_GROUP_METADATA;
4098
4099         ret = get_alloc_profile(root, flags);
4100         return ret;
4101 }
4102
4103 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4104 {
4105         struct btrfs_space_info *data_sinfo;
4106         struct btrfs_root *root = BTRFS_I(inode)->root;
4107         struct btrfs_fs_info *fs_info = root->fs_info;
4108         u64 used;
4109         int ret = 0;
4110         int need_commit = 2;
4111         int have_pinned_space;
4112
4113         /* make sure bytes are sectorsize aligned */
4114         bytes = ALIGN(bytes, root->sectorsize);
4115
4116         if (btrfs_is_free_space_inode(inode)) {
4117                 need_commit = 0;
4118                 ASSERT(current->journal_info);
4119         }
4120
4121         data_sinfo = fs_info->data_sinfo;
4122         if (!data_sinfo)
4123                 goto alloc;
4124
4125 again:
4126         /* make sure we have enough space to handle the data first */
4127         spin_lock(&data_sinfo->lock);
4128         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4129                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4130                 data_sinfo->bytes_may_use;
4131
4132         if (used + bytes > data_sinfo->total_bytes) {
4133                 struct btrfs_trans_handle *trans;
4134
4135                 /*
4136                  * if we don't have enough free bytes in this space then we need
4137                  * to alloc a new chunk.
4138                  */
4139                 if (!data_sinfo->full) {
4140                         u64 alloc_target;
4141
4142                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4143                         spin_unlock(&data_sinfo->lock);
4144 alloc:
4145                         alloc_target = btrfs_get_alloc_profile(root, 1);
4146                         /*
4147                          * It is ugly that we don't call nolock join
4148                          * transaction for the free space inode case here.
4149                          * But it is safe because we only do the data space
4150                          * reservation for the free space cache in the
4151                          * transaction context, the common join transaction
4152                          * just increase the counter of the current transaction
4153                          * handler, doesn't try to acquire the trans_lock of
4154                          * the fs.
4155                          */
4156                         trans = btrfs_join_transaction(root);
4157                         if (IS_ERR(trans))
4158                                 return PTR_ERR(trans);
4159
4160                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4161                                              alloc_target,
4162                                              CHUNK_ALLOC_NO_FORCE);
4163                         btrfs_end_transaction(trans, root);
4164                         if (ret < 0) {
4165                                 if (ret != -ENOSPC)
4166                                         return ret;
4167                                 else {
4168                                         have_pinned_space = 1;
4169                                         goto commit_trans;
4170                                 }
4171                         }
4172
4173                         if (!data_sinfo)
4174                                 data_sinfo = fs_info->data_sinfo;
4175
4176                         goto again;
4177                 }
4178
4179                 /*
4180                  * If we don't have enough pinned space to deal with this
4181                  * allocation, and no removed chunk in current transaction,
4182                  * don't bother committing the transaction.
4183                  */
4184                 have_pinned_space = percpu_counter_compare(
4185                         &data_sinfo->total_bytes_pinned,
4186                         used + bytes - data_sinfo->total_bytes);
4187                 spin_unlock(&data_sinfo->lock);
4188
4189                 /* commit the current transaction and try again */
4190 commit_trans:
4191                 if (need_commit &&
4192                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4193                         need_commit--;
4194
4195                         if (need_commit > 0) {
4196                                 btrfs_start_delalloc_roots(fs_info, 0, -1);
4197                                 btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
4198                         }
4199
4200                         trans = btrfs_join_transaction(root);
4201                         if (IS_ERR(trans))
4202                                 return PTR_ERR(trans);
4203                         if (have_pinned_space >= 0 ||
4204                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4205                                      &trans->transaction->flags) ||
4206                             need_commit > 0) {
4207                                 ret = btrfs_commit_transaction(trans, root);
4208                                 if (ret)
4209                                         return ret;
4210                                 /*
4211                                  * The cleaner kthread might still be doing iput
4212                                  * operations. Wait for it to finish so that
4213                                  * more space is released.
4214                                  */
4215                                 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4216                                 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4217                                 goto again;
4218                         } else {
4219                                 btrfs_end_transaction(trans, root);
4220                         }
4221                 }
4222
4223                 trace_btrfs_space_reservation(root->fs_info,
4224                                               "space_info:enospc",
4225                                               data_sinfo->flags, bytes, 1);
4226                 return -ENOSPC;
4227         }
4228         data_sinfo->bytes_may_use += bytes;
4229         trace_btrfs_space_reservation(root->fs_info, "space_info",
4230                                       data_sinfo->flags, bytes, 1);
4231         spin_unlock(&data_sinfo->lock);
4232
4233         return ret;
4234 }
4235
4236 /*
4237  * New check_data_free_space() with ability for precious data reservation
4238  * Will replace old btrfs_check_data_free_space(), but for patch split,
4239  * add a new function first and then replace it.
4240  */
4241 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4242 {
4243         struct btrfs_root *root = BTRFS_I(inode)->root;
4244         int ret;
4245
4246         /* align the range */
4247         len = round_up(start + len, root->sectorsize) -
4248               round_down(start, root->sectorsize);
4249         start = round_down(start, root->sectorsize);
4250
4251         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4252         if (ret < 0)
4253                 return ret;
4254
4255         /*
4256          * Use new btrfs_qgroup_reserve_data to reserve precious data space
4257          *
4258          * TODO: Find a good method to avoid reserve data space for NOCOW
4259          * range, but don't impact performance on quota disable case.
4260          */
4261         ret = btrfs_qgroup_reserve_data(inode, start, len);
4262         return ret;
4263 }
4264
4265 /*
4266  * Called if we need to clear a data reservation for this inode
4267  * Normally in a error case.
4268  *
4269  * This one will *NOT* use accurate qgroup reserved space API, just for case
4270  * which we can't sleep and is sure it won't affect qgroup reserved space.
4271  * Like clear_bit_hook().
4272  */
4273 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4274                                             u64 len)
4275 {
4276         struct btrfs_root *root = BTRFS_I(inode)->root;
4277         struct btrfs_space_info *data_sinfo;
4278
4279         /* Make sure the range is aligned to sectorsize */
4280         len = round_up(start + len, root->sectorsize) -
4281               round_down(start, root->sectorsize);
4282         start = round_down(start, root->sectorsize);
4283
4284         data_sinfo = root->fs_info->data_sinfo;
4285         spin_lock(&data_sinfo->lock);
4286         if (WARN_ON(data_sinfo->bytes_may_use < len))
4287                 data_sinfo->bytes_may_use = 0;
4288         else
4289                 data_sinfo->bytes_may_use -= len;
4290         trace_btrfs_space_reservation(root->fs_info, "space_info",
4291                                       data_sinfo->flags, len, 0);
4292         spin_unlock(&data_sinfo->lock);
4293 }
4294
4295 /*
4296  * Called if we need to clear a data reservation for this inode
4297  * Normally in a error case.
4298  *
4299  * This one will handle the per-inode data rsv map for accurate reserved
4300  * space framework.
4301  */
4302 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4303 {
4304         btrfs_free_reserved_data_space_noquota(inode, start, len);
4305         btrfs_qgroup_free_data(inode, start, len);
4306 }
4307
4308 static void force_metadata_allocation(struct btrfs_fs_info *info)
4309 {
4310         struct list_head *head = &info->space_info;
4311         struct btrfs_space_info *found;
4312
4313         rcu_read_lock();
4314         list_for_each_entry_rcu(found, head, list) {
4315                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4316                         found->force_alloc = CHUNK_ALLOC_FORCE;
4317         }
4318         rcu_read_unlock();
4319 }
4320
4321 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4322 {
4323         return (global->size << 1);
4324 }
4325
4326 static int should_alloc_chunk(struct btrfs_root *root,
4327                               struct btrfs_space_info *sinfo, int force)
4328 {
4329         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4330         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4331         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4332         u64 thresh;
4333
4334         if (force == CHUNK_ALLOC_FORCE)
4335                 return 1;
4336
4337         /*
4338          * We need to take into account the global rsv because for all intents
4339          * and purposes it's used space.  Don't worry about locking the
4340          * global_rsv, it doesn't change except when the transaction commits.
4341          */
4342         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4343                 num_allocated += calc_global_rsv_need_space(global_rsv);
4344
4345         /*
4346          * in limited mode, we want to have some free space up to
4347          * about 1% of the FS size.
4348          */
4349         if (force == CHUNK_ALLOC_LIMITED) {
4350                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4351                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4352
4353                 if (num_bytes - num_allocated < thresh)
4354                         return 1;
4355         }
4356
4357         if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4358                 return 0;
4359         return 1;
4360 }
4361
4362 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4363 {
4364         u64 num_dev;
4365
4366         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4367                     BTRFS_BLOCK_GROUP_RAID0 |
4368                     BTRFS_BLOCK_GROUP_RAID5 |
4369                     BTRFS_BLOCK_GROUP_RAID6))
4370                 num_dev = root->fs_info->fs_devices->rw_devices;
4371         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4372                 num_dev = 2;
4373         else
4374                 num_dev = 1;    /* DUP or single */
4375
4376         return num_dev;
4377 }
4378
4379 /*
4380  * If @is_allocation is true, reserve space in the system space info necessary
4381  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4382  * removing a chunk.
4383  */
4384 void check_system_chunk(struct btrfs_trans_handle *trans,
4385                         struct btrfs_root *root,
4386                         u64 type)
4387 {
4388         struct btrfs_space_info *info;
4389         u64 left;
4390         u64 thresh;
4391         int ret = 0;
4392         u64 num_devs;
4393
4394         /*
4395          * Needed because we can end up allocating a system chunk and for an
4396          * atomic and race free space reservation in the chunk block reserve.
4397          */
4398         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4399
4400         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4401         spin_lock(&info->lock);
4402         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4403                 info->bytes_reserved - info->bytes_readonly -
4404                 info->bytes_may_use;
4405         spin_unlock(&info->lock);
4406
4407         num_devs = get_profile_num_devs(root, type);
4408
4409         /* num_devs device items to update and 1 chunk item to add or remove */
4410         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4411                 btrfs_calc_trans_metadata_size(root, 1);
4412
4413         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4414                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4415                         left, thresh, type);
4416                 dump_space_info(info, 0, 0);
4417         }
4418
4419         if (left < thresh) {
4420                 u64 flags;
4421
4422                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4423                 /*
4424                  * Ignore failure to create system chunk. We might end up not
4425                  * needing it, as we might not need to COW all nodes/leafs from
4426                  * the paths we visit in the chunk tree (they were already COWed
4427                  * or created in the current transaction for example).
4428                  */
4429                 ret = btrfs_alloc_chunk(trans, root, flags);
4430         }
4431
4432         if (!ret) {
4433                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4434                                           &root->fs_info->chunk_block_rsv,
4435                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4436                 if (!ret)
4437                         trans->chunk_bytes_reserved += thresh;
4438         }
4439 }
4440
4441 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4442                           struct btrfs_root *extent_root, u64 flags, int force)
4443 {
4444         struct btrfs_space_info *space_info;
4445         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4446         int wait_for_alloc = 0;
4447         int ret = 0;
4448
4449         /* Don't re-enter if we're already allocating a chunk */
4450         if (trans->allocating_chunk)
4451                 return -ENOSPC;
4452
4453         space_info = __find_space_info(extent_root->fs_info, flags);
4454         if (!space_info) {
4455                 ret = update_space_info(extent_root->fs_info, flags,
4456                                         0, 0, &space_info);
4457                 BUG_ON(ret); /* -ENOMEM */
4458         }
4459         BUG_ON(!space_info); /* Logic error */
4460
4461 again:
4462         spin_lock(&space_info->lock);
4463         if (force < space_info->force_alloc)
4464                 force = space_info->force_alloc;
4465         if (space_info->full) {
4466                 if (should_alloc_chunk(extent_root, space_info, force))
4467                         ret = -ENOSPC;
4468                 else
4469                         ret = 0;
4470                 spin_unlock(&space_info->lock);
4471                 return ret;
4472         }
4473
4474         if (!should_alloc_chunk(extent_root, space_info, force)) {
4475                 spin_unlock(&space_info->lock);
4476                 return 0;
4477         } else if (space_info->chunk_alloc) {
4478                 wait_for_alloc = 1;
4479         } else {
4480                 space_info->chunk_alloc = 1;
4481         }
4482
4483         spin_unlock(&space_info->lock);
4484
4485         mutex_lock(&fs_info->chunk_mutex);
4486
4487         /*
4488          * The chunk_mutex is held throughout the entirety of a chunk
4489          * allocation, so once we've acquired the chunk_mutex we know that the
4490          * other guy is done and we need to recheck and see if we should
4491          * allocate.
4492          */
4493         if (wait_for_alloc) {
4494                 mutex_unlock(&fs_info->chunk_mutex);
4495                 wait_for_alloc = 0;
4496                 goto again;
4497         }
4498
4499         trans->allocating_chunk = true;
4500
4501         /*
4502          * If we have mixed data/metadata chunks we want to make sure we keep
4503          * allocating mixed chunks instead of individual chunks.
4504          */
4505         if (btrfs_mixed_space_info(space_info))
4506                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4507
4508         /*
4509          * if we're doing a data chunk, go ahead and make sure that
4510          * we keep a reasonable number of metadata chunks allocated in the
4511          * FS as well.
4512          */
4513         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4514                 fs_info->data_chunk_allocations++;
4515                 if (!(fs_info->data_chunk_allocations %
4516                       fs_info->metadata_ratio))
4517                         force_metadata_allocation(fs_info);
4518         }
4519
4520         /*
4521          * Check if we have enough space in SYSTEM chunk because we may need
4522          * to update devices.
4523          */
4524         check_system_chunk(trans, extent_root, flags);
4525
4526         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4527         trans->allocating_chunk = false;
4528
4529         spin_lock(&space_info->lock);
4530         if (ret < 0 && ret != -ENOSPC)
4531                 goto out;
4532         if (ret)
4533                 space_info->full = 1;
4534         else
4535                 ret = 1;
4536
4537         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4538 out:
4539         space_info->chunk_alloc = 0;
4540         spin_unlock(&space_info->lock);
4541         mutex_unlock(&fs_info->chunk_mutex);
4542         /*
4543          * When we allocate a new chunk we reserve space in the chunk block
4544          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4545          * add new nodes/leafs to it if we end up needing to do it when
4546          * inserting the chunk item and updating device items as part of the
4547          * second phase of chunk allocation, performed by
4548          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4549          * large number of new block groups to create in our transaction
4550          * handle's new_bgs list to avoid exhausting the chunk block reserve
4551          * in extreme cases - like having a single transaction create many new
4552          * block groups when starting to write out the free space caches of all
4553          * the block groups that were made dirty during the lifetime of the
4554          * transaction.
4555          */
4556         if (trans->can_flush_pending_bgs &&
4557             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4558                 btrfs_create_pending_block_groups(trans, trans->root);
4559                 btrfs_trans_release_chunk_metadata(trans);
4560         }
4561         return ret;
4562 }
4563
4564 static int can_overcommit(struct btrfs_root *root,
4565                           struct btrfs_space_info *space_info, u64 bytes,
4566                           enum btrfs_reserve_flush_enum flush)
4567 {
4568         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4569         u64 profile = btrfs_get_alloc_profile(root, 0);
4570         u64 space_size;
4571         u64 avail;
4572         u64 used;
4573
4574         used = space_info->bytes_used + space_info->bytes_reserved +
4575                 space_info->bytes_pinned + space_info->bytes_readonly;
4576
4577         /*
4578          * We only want to allow over committing if we have lots of actual space
4579          * free, but if we don't have enough space to handle the global reserve
4580          * space then we could end up having a real enospc problem when trying
4581          * to allocate a chunk or some other such important allocation.
4582          */
4583         spin_lock(&global_rsv->lock);
4584         space_size = calc_global_rsv_need_space(global_rsv);
4585         spin_unlock(&global_rsv->lock);
4586         if (used + space_size >= space_info->total_bytes)
4587                 return 0;
4588
4589         used += space_info->bytes_may_use;
4590
4591         spin_lock(&root->fs_info->free_chunk_lock);
4592         avail = root->fs_info->free_chunk_space;
4593         spin_unlock(&root->fs_info->free_chunk_lock);
4594
4595         /*
4596          * If we have dup, raid1 or raid10 then only half of the free
4597          * space is actually useable.  For raid56, the space info used
4598          * doesn't include the parity drive, so we don't have to
4599          * change the math
4600          */
4601         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4602                        BTRFS_BLOCK_GROUP_RAID1 |
4603                        BTRFS_BLOCK_GROUP_RAID10))
4604                 avail >>= 1;
4605
4606         /*
4607          * If we aren't flushing all things, let us overcommit up to
4608          * 1/2th of the space. If we can flush, don't let us overcommit
4609          * too much, let it overcommit up to 1/8 of the space.
4610          */
4611         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4612                 avail >>= 3;
4613         else
4614                 avail >>= 1;
4615
4616         if (used + bytes < space_info->total_bytes + avail)
4617                 return 1;
4618         return 0;
4619 }
4620
4621 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4622                                          unsigned long nr_pages, int nr_items)
4623 {
4624         struct super_block *sb = root->fs_info->sb;
4625
4626         if (down_read_trylock(&sb->s_umount)) {
4627                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4628                 up_read(&sb->s_umount);
4629         } else {
4630                 /*
4631                  * We needn't worry the filesystem going from r/w to r/o though
4632                  * we don't acquire ->s_umount mutex, because the filesystem
4633                  * should guarantee the delalloc inodes list be empty after
4634                  * the filesystem is readonly(all dirty pages are written to
4635                  * the disk).
4636                  */
4637                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4638                 if (!current->journal_info)
4639                         btrfs_wait_ordered_roots(root->fs_info, nr_items,
4640                                                  0, (u64)-1);
4641         }
4642 }
4643
4644 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4645 {
4646         u64 bytes;
4647         int nr;
4648
4649         bytes = btrfs_calc_trans_metadata_size(root, 1);
4650         nr = (int)div64_u64(to_reclaim, bytes);
4651         if (!nr)
4652                 nr = 1;
4653         return nr;
4654 }
4655
4656 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4657
4658 /*
4659  * shrink metadata reservation for delalloc
4660  */
4661 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4662                             bool wait_ordered)
4663 {
4664         struct btrfs_block_rsv *block_rsv;
4665         struct btrfs_space_info *space_info;
4666         struct btrfs_trans_handle *trans;
4667         u64 delalloc_bytes;
4668         u64 max_reclaim;
4669         long time_left;
4670         unsigned long nr_pages;
4671         int loops;
4672         int items;
4673         enum btrfs_reserve_flush_enum flush;
4674
4675         /* Calc the number of the pages we need flush for space reservation */
4676         items = calc_reclaim_items_nr(root, to_reclaim);
4677         to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
4678
4679         trans = (struct btrfs_trans_handle *)current->journal_info;
4680         block_rsv = &root->fs_info->delalloc_block_rsv;
4681         space_info = block_rsv->space_info;
4682
4683         delalloc_bytes = percpu_counter_sum_positive(
4684                                                 &root->fs_info->delalloc_bytes);
4685         if (delalloc_bytes == 0) {
4686                 if (trans)
4687                         return;
4688                 if (wait_ordered)
4689                         btrfs_wait_ordered_roots(root->fs_info, items,
4690                                                  0, (u64)-1);
4691                 return;
4692         }
4693
4694         loops = 0;
4695         while (delalloc_bytes && loops < 3) {
4696                 max_reclaim = min(delalloc_bytes, to_reclaim);
4697                 nr_pages = max_reclaim >> PAGE_SHIFT;
4698                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4699                 /*
4700                  * We need to wait for the async pages to actually start before
4701                  * we do anything.
4702                  */
4703                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4704                 if (!max_reclaim)
4705                         goto skip_async;
4706
4707                 if (max_reclaim <= nr_pages)
4708                         max_reclaim = 0;
4709                 else
4710                         max_reclaim -= nr_pages;
4711
4712                 wait_event(root->fs_info->async_submit_wait,
4713                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4714                            (int)max_reclaim);
4715 skip_async:
4716                 if (!trans)
4717                         flush = BTRFS_RESERVE_FLUSH_ALL;
4718                 else
4719                         flush = BTRFS_RESERVE_NO_FLUSH;
4720                 spin_lock(&space_info->lock);
4721                 if (can_overcommit(root, space_info, orig, flush)) {
4722                         spin_unlock(&space_info->lock);
4723                         break;
4724                 }
4725                 spin_unlock(&space_info->lock);
4726
4727                 loops++;
4728                 if (wait_ordered && !trans) {
4729                         btrfs_wait_ordered_roots(root->fs_info, items,
4730                                                  0, (u64)-1);
4731                 } else {
4732                         time_left = schedule_timeout_killable(1);
4733                         if (time_left)
4734                                 break;
4735                 }
4736                 delalloc_bytes = percpu_counter_sum_positive(
4737                                                 &root->fs_info->delalloc_bytes);
4738         }
4739 }
4740
4741 /**
4742  * maybe_commit_transaction - possibly commit the transaction if its ok to
4743  * @root - the root we're allocating for
4744  * @bytes - the number of bytes we want to reserve
4745  * @force - force the commit
4746  *
4747  * This will check to make sure that committing the transaction will actually
4748  * get us somewhere and then commit the transaction if it does.  Otherwise it
4749  * will return -ENOSPC.
4750  */
4751 static int may_commit_transaction(struct btrfs_root *root,
4752                                   struct btrfs_space_info *space_info,
4753                                   u64 bytes, int force)
4754 {
4755         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4756         struct btrfs_trans_handle *trans;
4757
4758         trans = (struct btrfs_trans_handle *)current->journal_info;
4759         if (trans)
4760                 return -EAGAIN;
4761
4762         if (force)
4763                 goto commit;
4764
4765         /* See if there is enough pinned space to make this reservation */
4766         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4767                                    bytes) >= 0)
4768                 goto commit;
4769
4770         /*
4771          * See if there is some space in the delayed insertion reservation for
4772          * this reservation.
4773          */
4774         if (space_info != delayed_rsv->space_info)
4775                 return -ENOSPC;
4776
4777         spin_lock(&delayed_rsv->lock);
4778         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4779                                    bytes - delayed_rsv->size) >= 0) {
4780                 spin_unlock(&delayed_rsv->lock);
4781                 return -ENOSPC;
4782         }
4783         spin_unlock(&delayed_rsv->lock);
4784
4785 commit:
4786         trans = btrfs_join_transaction(root);
4787         if (IS_ERR(trans))
4788                 return -ENOSPC;
4789
4790         return btrfs_commit_transaction(trans, root);
4791 }
4792
4793 enum flush_state {
4794         FLUSH_DELAYED_ITEMS_NR  =       1,
4795         FLUSH_DELAYED_ITEMS     =       2,
4796         FLUSH_DELALLOC          =       3,
4797         FLUSH_DELALLOC_WAIT     =       4,
4798         ALLOC_CHUNK             =       5,
4799         COMMIT_TRANS            =       6,
4800 };
4801
4802 static int flush_space(struct btrfs_root *root,
4803                        struct btrfs_space_info *space_info, u64 num_bytes,
4804                        u64 orig_bytes, int state)
4805 {
4806         struct btrfs_trans_handle *trans;
4807         int nr;
4808         int ret = 0;
4809
4810         switch (state) {
4811         case FLUSH_DELAYED_ITEMS_NR:
4812         case FLUSH_DELAYED_ITEMS:
4813                 if (state == FLUSH_DELAYED_ITEMS_NR)
4814                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4815                 else
4816                         nr = -1;
4817
4818                 trans = btrfs_join_transaction(root);
4819                 if (IS_ERR(trans)) {
4820                         ret = PTR_ERR(trans);
4821                         break;
4822                 }
4823                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4824                 btrfs_end_transaction(trans, root);
4825                 break;
4826         case FLUSH_DELALLOC:
4827         case FLUSH_DELALLOC_WAIT:
4828                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4829                                 state == FLUSH_DELALLOC_WAIT);
4830                 break;
4831         case ALLOC_CHUNK:
4832                 trans = btrfs_join_transaction(root);
4833                 if (IS_ERR(trans)) {
4834                         ret = PTR_ERR(trans);
4835                         break;
4836                 }
4837                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4838                                      btrfs_get_alloc_profile(root, 0),
4839                                      CHUNK_ALLOC_NO_FORCE);
4840                 btrfs_end_transaction(trans, root);
4841                 if (ret == -ENOSPC)
4842                         ret = 0;
4843                 break;
4844         case COMMIT_TRANS:
4845                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4846                 break;
4847         default:
4848                 ret = -ENOSPC;
4849                 break;
4850         }
4851
4852         return ret;
4853 }
4854
4855 static inline u64
4856 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4857                                  struct btrfs_space_info *space_info)
4858 {
4859         u64 used;
4860         u64 expected;
4861         u64 to_reclaim;
4862
4863         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4864         spin_lock(&space_info->lock);
4865         if (can_overcommit(root, space_info, to_reclaim,
4866                            BTRFS_RESERVE_FLUSH_ALL)) {
4867                 to_reclaim = 0;
4868                 goto out;
4869         }
4870
4871         used = space_info->bytes_used + space_info->bytes_reserved +
4872                space_info->bytes_pinned + space_info->bytes_readonly +
4873                space_info->bytes_may_use;
4874         if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4875                 expected = div_factor_fine(space_info->total_bytes, 95);
4876         else
4877                 expected = div_factor_fine(space_info->total_bytes, 90);
4878
4879         if (used > expected)
4880                 to_reclaim = used - expected;
4881         else
4882                 to_reclaim = 0;
4883         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4884                                      space_info->bytes_reserved);
4885 out:
4886         spin_unlock(&space_info->lock);
4887
4888         return to_reclaim;
4889 }
4890
4891 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4892                                         struct btrfs_fs_info *fs_info, u64 used)
4893 {
4894         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4895
4896         /* If we're just plain full then async reclaim just slows us down. */
4897         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4898                 return 0;
4899
4900         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4901                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4902 }
4903
4904 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4905                                        struct btrfs_fs_info *fs_info,
4906                                        int flush_state)
4907 {
4908         u64 used;
4909
4910         spin_lock(&space_info->lock);
4911         /*
4912          * We run out of space and have not got any free space via flush_space,
4913          * so don't bother doing async reclaim.
4914          */
4915         if (flush_state > COMMIT_TRANS && space_info->full) {
4916                 spin_unlock(&space_info->lock);
4917                 return 0;
4918         }
4919
4920         used = space_info->bytes_used + space_info->bytes_reserved +
4921                space_info->bytes_pinned + space_info->bytes_readonly +
4922                space_info->bytes_may_use;
4923         if (need_do_async_reclaim(space_info, fs_info, used)) {
4924                 spin_unlock(&space_info->lock);
4925                 return 1;
4926         }
4927         spin_unlock(&space_info->lock);
4928
4929         return 0;
4930 }
4931
4932 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4933 {
4934         struct btrfs_fs_info *fs_info;
4935         struct btrfs_space_info *space_info;
4936         u64 to_reclaim;
4937         int flush_state;
4938
4939         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4940         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4941
4942         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4943                                                       space_info);
4944         if (!to_reclaim)
4945                 return;
4946
4947         flush_state = FLUSH_DELAYED_ITEMS_NR;
4948         do {
4949                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4950                             to_reclaim, flush_state);
4951                 flush_state++;
4952                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4953                                                  flush_state))
4954                         return;
4955         } while (flush_state < COMMIT_TRANS);
4956 }
4957
4958 void btrfs_init_async_reclaim_work(struct work_struct *work)
4959 {
4960         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4961 }
4962
4963 /**
4964  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4965  * @root - the root we're allocating for
4966  * @block_rsv - the block_rsv we're allocating for
4967  * @orig_bytes - the number of bytes we want
4968  * @flush - whether or not we can flush to make our reservation
4969  *
4970  * This will reserve orig_bytes number of bytes from the space info associated
4971  * with the block_rsv.  If there is not enough space it will make an attempt to
4972  * flush out space to make room.  It will do this by flushing delalloc if
4973  * possible or committing the transaction.  If flush is 0 then no attempts to
4974  * regain reservations will be made and this will fail if there is not enough
4975  * space already.
4976  */
4977 static int reserve_metadata_bytes(struct btrfs_root *root,
4978                                   struct btrfs_block_rsv *block_rsv,
4979                                   u64 orig_bytes,
4980                                   enum btrfs_reserve_flush_enum flush)
4981 {
4982         struct btrfs_space_info *space_info = block_rsv->space_info;
4983         u64 used;
4984         u64 num_bytes = orig_bytes;
4985         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4986         int ret = 0;
4987         bool flushing = false;
4988
4989 again:
4990         ret = 0;
4991         spin_lock(&space_info->lock);
4992         /*
4993          * We only want to wait if somebody other than us is flushing and we
4994          * are actually allowed to flush all things.
4995          */
4996         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4997                space_info->flush) {
4998                 spin_unlock(&space_info->lock);
4999                 /*
5000                  * If we have a trans handle we can't wait because the flusher
5001                  * may have to commit the transaction, which would mean we would
5002                  * deadlock since we are waiting for the flusher to finish, but
5003                  * hold the current transaction open.
5004                  */
5005                 if (current->journal_info)
5006                         return -EAGAIN;
5007                 ret = wait_event_killable(space_info->wait, !space_info->flush);
5008                 /* Must have been killed, return */
5009                 if (ret)
5010                         return -EINTR;
5011
5012                 spin_lock(&space_info->lock);
5013         }
5014
5015         ret = -ENOSPC;
5016         used = space_info->bytes_used + space_info->bytes_reserved +
5017                 space_info->bytes_pinned + space_info->bytes_readonly +
5018                 space_info->bytes_may_use;
5019
5020         /*
5021          * The idea here is that we've not already over-reserved the block group
5022          * then we can go ahead and save our reservation first and then start
5023          * flushing if we need to.  Otherwise if we've already overcommitted
5024          * lets start flushing stuff first and then come back and try to make
5025          * our reservation.
5026          */
5027         if (used <= space_info->total_bytes) {
5028                 if (used + orig_bytes <= space_info->total_bytes) {
5029                         space_info->bytes_may_use += orig_bytes;
5030                         trace_btrfs_space_reservation(root->fs_info,
5031                                 "space_info", space_info->flags, orig_bytes, 1);
5032                         ret = 0;
5033                 } else {
5034                         /*
5035                          * Ok set num_bytes to orig_bytes since we aren't
5036                          * overocmmitted, this way we only try and reclaim what
5037                          * we need.
5038                          */
5039                         num_bytes = orig_bytes;
5040                 }
5041         } else {
5042                 /*
5043                  * Ok we're over committed, set num_bytes to the overcommitted
5044                  * amount plus the amount of bytes that we need for this
5045                  * reservation.
5046                  */
5047                 num_bytes = used - space_info->total_bytes +
5048                         (orig_bytes * 2);
5049         }
5050
5051         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
5052                 space_info->bytes_may_use += orig_bytes;
5053                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5054                                               space_info->flags, orig_bytes,
5055                                               1);
5056                 ret = 0;
5057         }
5058
5059         /*
5060          * Couldn't make our reservation, save our place so while we're trying
5061          * to reclaim space we can actually use it instead of somebody else
5062          * stealing it from us.
5063          *
5064          * We make the other tasks wait for the flush only when we can flush
5065          * all things.
5066          */
5067         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5068                 flushing = true;
5069                 space_info->flush = 1;
5070         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5071                 used += orig_bytes;
5072                 /*
5073                  * We will do the space reservation dance during log replay,
5074                  * which means we won't have fs_info->fs_root set, so don't do
5075                  * the async reclaim as we will panic.
5076                  */
5077                 if (!root->fs_info->log_root_recovering &&
5078                     need_do_async_reclaim(space_info, root->fs_info, used) &&
5079                     !work_busy(&root->fs_info->async_reclaim_work))
5080                         queue_work(system_unbound_wq,
5081                                    &root->fs_info->async_reclaim_work);
5082         }
5083         spin_unlock(&space_info->lock);
5084
5085         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5086                 goto out;
5087
5088         ret = flush_space(root, space_info, num_bytes, orig_bytes,
5089                           flush_state);
5090         flush_state++;
5091
5092         /*
5093          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
5094          * would happen. So skip delalloc flush.
5095          */
5096         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5097             (flush_state == FLUSH_DELALLOC ||
5098              flush_state == FLUSH_DELALLOC_WAIT))
5099                 flush_state = ALLOC_CHUNK;
5100
5101         if (!ret)
5102                 goto again;
5103         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5104                  flush_state < COMMIT_TRANS)
5105                 goto again;
5106         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
5107                  flush_state <= COMMIT_TRANS)
5108                 goto again;
5109
5110 out:
5111         if (ret == -ENOSPC &&
5112             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5113                 struct btrfs_block_rsv *global_rsv =
5114                         &root->fs_info->global_block_rsv;
5115
5116                 if (block_rsv != global_rsv &&
5117                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5118                         ret = 0;
5119         }
5120         if (ret == -ENOSPC)
5121                 trace_btrfs_space_reservation(root->fs_info,
5122                                               "space_info:enospc",
5123                                               space_info->flags, orig_bytes, 1);
5124         if (flushing) {
5125                 spin_lock(&space_info->lock);
5126                 space_info->flush = 0;
5127                 wake_up_all(&space_info->wait);
5128                 spin_unlock(&space_info->lock);
5129         }
5130         return ret;
5131 }
5132
5133 static struct btrfs_block_rsv *get_block_rsv(
5134                                         const struct btrfs_trans_handle *trans,
5135                                         const struct btrfs_root *root)
5136 {
5137         struct btrfs_block_rsv *block_rsv = NULL;
5138
5139         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5140             (root == root->fs_info->csum_root && trans->adding_csums) ||
5141              (root == root->fs_info->uuid_root))
5142                 block_rsv = trans->block_rsv;
5143
5144         if (!block_rsv)
5145                 block_rsv = root->block_rsv;
5146
5147         if (!block_rsv)
5148                 block_rsv = &root->fs_info->empty_block_rsv;
5149
5150         return block_rsv;
5151 }
5152
5153 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5154                                u64 num_bytes)
5155 {
5156         int ret = -ENOSPC;
5157         spin_lock(&block_rsv->lock);
5158         if (block_rsv->reserved >= num_bytes) {
5159                 block_rsv->reserved -= num_bytes;
5160                 if (block_rsv->reserved < block_rsv->size)
5161                         block_rsv->full = 0;
5162                 ret = 0;
5163         }
5164         spin_unlock(&block_rsv->lock);
5165         return ret;
5166 }
5167
5168 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5169                                 u64 num_bytes, int update_size)
5170 {
5171         spin_lock(&block_rsv->lock);
5172         block_rsv->reserved += num_bytes;
5173         if (update_size)
5174                 block_rsv->size += num_bytes;
5175         else if (block_rsv->reserved >= block_rsv->size)
5176                 block_rsv->full = 1;
5177         spin_unlock(&block_rsv->lock);
5178 }
5179
5180 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5181                              struct btrfs_block_rsv *dest, u64 num_bytes,
5182                              int min_factor)
5183 {
5184         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5185         u64 min_bytes;
5186
5187         if (global_rsv->space_info != dest->space_info)
5188                 return -ENOSPC;
5189
5190         spin_lock(&global_rsv->lock);
5191         min_bytes = div_factor(global_rsv->size, min_factor);
5192         if (global_rsv->reserved < min_bytes + num_bytes) {
5193                 spin_unlock(&global_rsv->lock);
5194                 return -ENOSPC;
5195         }
5196         global_rsv->reserved -= num_bytes;
5197         if (global_rsv->reserved < global_rsv->size)
5198                 global_rsv->full = 0;
5199         spin_unlock(&global_rsv->lock);
5200
5201         block_rsv_add_bytes(dest, num_bytes, 1);
5202         return 0;
5203 }
5204
5205 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5206                                     struct btrfs_block_rsv *block_rsv,
5207                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5208 {
5209         struct btrfs_space_info *space_info = block_rsv->space_info;
5210
5211         spin_lock(&block_rsv->lock);
5212         if (num_bytes == (u64)-1)
5213                 num_bytes = block_rsv->size;
5214         block_rsv->size -= num_bytes;
5215         if (block_rsv->reserved >= block_rsv->size) {
5216                 num_bytes = block_rsv->reserved - block_rsv->size;
5217                 block_rsv->reserved = block_rsv->size;
5218                 block_rsv->full = 1;
5219         } else {
5220                 num_bytes = 0;
5221         }
5222         spin_unlock(&block_rsv->lock);
5223
5224         if (num_bytes > 0) {
5225                 if (dest) {
5226                         spin_lock(&dest->lock);
5227                         if (!dest->full) {
5228                                 u64 bytes_to_add;
5229
5230                                 bytes_to_add = dest->size - dest->reserved;
5231                                 bytes_to_add = min(num_bytes, bytes_to_add);
5232                                 dest->reserved += bytes_to_add;
5233                                 if (dest->reserved >= dest->size)
5234                                         dest->full = 1;
5235                                 num_bytes -= bytes_to_add;
5236                         }
5237                         spin_unlock(&dest->lock);
5238                 }
5239                 if (num_bytes) {
5240                         spin_lock(&space_info->lock);
5241                         space_info->bytes_may_use -= num_bytes;
5242                         trace_btrfs_space_reservation(fs_info, "space_info",
5243                                         space_info->flags, num_bytes, 0);
5244                         spin_unlock(&space_info->lock);
5245                 }
5246         }
5247 }
5248
5249 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5250                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5251 {
5252         int ret;
5253
5254         ret = block_rsv_use_bytes(src, num_bytes);
5255         if (ret)
5256                 return ret;
5257
5258         block_rsv_add_bytes(dst, num_bytes, 1);
5259         return 0;
5260 }
5261
5262 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5263 {
5264         memset(rsv, 0, sizeof(*rsv));
5265         spin_lock_init(&rsv->lock);
5266         rsv->type = type;
5267 }
5268
5269 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5270                                               unsigned short type)
5271 {
5272         struct btrfs_block_rsv *block_rsv;
5273         struct btrfs_fs_info *fs_info = root->fs_info;
5274
5275         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5276         if (!block_rsv)
5277                 return NULL;
5278
5279         btrfs_init_block_rsv(block_rsv, type);
5280         block_rsv->space_info = __find_space_info(fs_info,
5281                                                   BTRFS_BLOCK_GROUP_METADATA);
5282         return block_rsv;
5283 }
5284
5285 void btrfs_free_block_rsv(struct btrfs_root *root,
5286                           struct btrfs_block_rsv *rsv)
5287 {
5288         if (!rsv)
5289                 return;
5290         btrfs_block_rsv_release(root, rsv, (u64)-1);
5291         kfree(rsv);
5292 }
5293
5294 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5295 {
5296         kfree(rsv);
5297 }
5298
5299 int btrfs_block_rsv_add(struct btrfs_root *root,
5300                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5301                         enum btrfs_reserve_flush_enum flush)
5302 {
5303         int ret;
5304
5305         if (num_bytes == 0)
5306                 return 0;
5307
5308         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5309         if (!ret) {
5310                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5311                 return 0;
5312         }
5313
5314         return ret;
5315 }
5316
5317 int btrfs_block_rsv_check(struct btrfs_root *root,
5318                           struct btrfs_block_rsv *block_rsv, int min_factor)
5319 {
5320         u64 num_bytes = 0;
5321         int ret = -ENOSPC;
5322
5323         if (!block_rsv)
5324                 return 0;
5325
5326         spin_lock(&block_rsv->lock);
5327         num_bytes = div_factor(block_rsv->size, min_factor);
5328         if (block_rsv->reserved >= num_bytes)
5329                 ret = 0;
5330         spin_unlock(&block_rsv->lock);
5331
5332         return ret;
5333 }
5334
5335 int btrfs_block_rsv_refill(struct btrfs_root *root,
5336                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5337                            enum btrfs_reserve_flush_enum flush)
5338 {
5339         u64 num_bytes = 0;
5340         int ret = -ENOSPC;
5341
5342         if (!block_rsv)
5343                 return 0;
5344
5345         spin_lock(&block_rsv->lock);
5346         num_bytes = min_reserved;
5347         if (block_rsv->reserved >= num_bytes)
5348                 ret = 0;
5349         else
5350                 num_bytes -= block_rsv->reserved;
5351         spin_unlock(&block_rsv->lock);
5352
5353         if (!ret)
5354                 return 0;
5355
5356         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5357         if (!ret) {
5358                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5359                 return 0;
5360         }
5361
5362         return ret;
5363 }
5364
5365 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5366                             struct btrfs_block_rsv *dst_rsv,
5367                             u64 num_bytes)
5368 {
5369         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5370 }
5371
5372 void btrfs_block_rsv_release(struct btrfs_root *root,
5373                              struct btrfs_block_rsv *block_rsv,
5374                              u64 num_bytes)
5375 {
5376         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5377         if (global_rsv == block_rsv ||
5378             block_rsv->space_info != global_rsv->space_info)
5379                 global_rsv = NULL;
5380         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5381                                 num_bytes);
5382 }
5383
5384 /*
5385  * helper to calculate size of global block reservation.
5386  * the desired value is sum of space used by extent tree,
5387  * checksum tree and root tree
5388  */
5389 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5390 {
5391         struct btrfs_space_info *sinfo;
5392         u64 num_bytes;
5393         u64 meta_used;
5394         u64 data_used;
5395         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5396
5397         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5398         spin_lock(&sinfo->lock);
5399         data_used = sinfo->bytes_used;
5400         spin_unlock(&sinfo->lock);
5401
5402         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5403         spin_lock(&sinfo->lock);
5404         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5405                 data_used = 0;
5406         meta_used = sinfo->bytes_used;
5407         spin_unlock(&sinfo->lock);
5408
5409         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5410                     csum_size * 2;
5411         num_bytes += div_u64(data_used + meta_used, 50);
5412
5413         if (num_bytes * 3 > meta_used)
5414                 num_bytes = div_u64(meta_used, 3);
5415
5416         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5417 }
5418
5419 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5420 {
5421         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5422         struct btrfs_space_info *sinfo = block_rsv->space_info;
5423         u64 num_bytes;
5424
5425         num_bytes = calc_global_metadata_size(fs_info);
5426
5427         spin_lock(&sinfo->lock);
5428         spin_lock(&block_rsv->lock);
5429
5430         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5431
5432         if (block_rsv->reserved < block_rsv->size) {
5433                 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5434                         sinfo->bytes_reserved + sinfo->bytes_readonly +
5435                         sinfo->bytes_may_use;
5436                 if (sinfo->total_bytes > num_bytes) {
5437                         num_bytes = sinfo->total_bytes - num_bytes;
5438                         num_bytes = min(num_bytes,
5439                                         block_rsv->size - block_rsv->reserved);
5440                         block_rsv->reserved += num_bytes;
5441                         sinfo->bytes_may_use += num_bytes;
5442                         trace_btrfs_space_reservation(fs_info, "space_info",
5443                                                       sinfo->flags, num_bytes,
5444                                                       1);
5445                 }
5446         } else if (block_rsv->reserved > block_rsv->size) {
5447                 num_bytes = block_rsv->reserved - block_rsv->size;
5448                 sinfo->bytes_may_use -= num_bytes;
5449                 trace_btrfs_space_reservation(fs_info, "space_info",
5450                                       sinfo->flags, num_bytes, 0);
5451                 block_rsv->reserved = block_rsv->size;
5452         }
5453
5454         if (block_rsv->reserved == block_rsv->size)
5455                 block_rsv->full = 1;
5456         else
5457                 block_rsv->full = 0;
5458
5459         spin_unlock(&block_rsv->lock);
5460         spin_unlock(&sinfo->lock);
5461 }
5462
5463 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5464 {
5465         struct btrfs_space_info *space_info;
5466
5467         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5468         fs_info->chunk_block_rsv.space_info = space_info;
5469
5470         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5471         fs_info->global_block_rsv.space_info = space_info;
5472         fs_info->delalloc_block_rsv.space_info = space_info;
5473         fs_info->trans_block_rsv.space_info = space_info;
5474         fs_info->empty_block_rsv.space_info = space_info;
5475         fs_info->delayed_block_rsv.space_info = space_info;
5476
5477         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5478         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5479         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5480         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5481         if (fs_info->quota_root)
5482                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5483         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5484
5485         update_global_block_rsv(fs_info);
5486 }
5487
5488 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5489 {
5490         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5491                                 (u64)-1);
5492         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5493         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5494         WARN_ON(fs_info->trans_block_rsv.size > 0);
5495         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5496         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5497         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5498         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5499         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5500 }
5501
5502 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5503                                   struct btrfs_root *root)
5504 {
5505         if (!trans->block_rsv)
5506                 return;
5507
5508         if (!trans->bytes_reserved)
5509                 return;
5510
5511         trace_btrfs_space_reservation(root->fs_info, "transaction",
5512                                       trans->transid, trans->bytes_reserved, 0);
5513         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5514         trans->bytes_reserved = 0;
5515 }
5516
5517 /*
5518  * To be called after all the new block groups attached to the transaction
5519  * handle have been created (btrfs_create_pending_block_groups()).
5520  */
5521 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5522 {
5523         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5524
5525         if (!trans->chunk_bytes_reserved)
5526                 return;
5527
5528         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5529
5530         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5531                                 trans->chunk_bytes_reserved);
5532         trans->chunk_bytes_reserved = 0;
5533 }
5534
5535 /* Can only return 0 or -ENOSPC */
5536 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5537                                   struct inode *inode)
5538 {
5539         struct btrfs_root *root = BTRFS_I(inode)->root;
5540         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5541         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5542
5543         /*
5544          * We need to hold space in order to delete our orphan item once we've
5545          * added it, so this takes the reservation so we can release it later
5546          * when we are truly done with the orphan item.
5547          */
5548         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5549         trace_btrfs_space_reservation(root->fs_info, "orphan",
5550                                       btrfs_ino(inode), num_bytes, 1);
5551         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5552 }
5553
5554 void btrfs_orphan_release_metadata(struct inode *inode)
5555 {
5556         struct btrfs_root *root = BTRFS_I(inode)->root;
5557         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5558         trace_btrfs_space_reservation(root->fs_info, "orphan",
5559                                       btrfs_ino(inode), num_bytes, 0);
5560         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5561 }
5562
5563 /*
5564  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5565  * root: the root of the parent directory
5566  * rsv: block reservation
5567  * items: the number of items that we need do reservation
5568  * qgroup_reserved: used to return the reserved size in qgroup
5569  *
5570  * This function is used to reserve the space for snapshot/subvolume
5571  * creation and deletion. Those operations are different with the
5572  * common file/directory operations, they change two fs/file trees
5573  * and root tree, the number of items that the qgroup reserves is
5574  * different with the free space reservation. So we can not use
5575  * the space reservation mechanism in start_transaction().
5576  */
5577 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5578                                      struct btrfs_block_rsv *rsv,
5579                                      int items,
5580                                      u64 *qgroup_reserved,
5581                                      bool use_global_rsv)
5582 {
5583         u64 num_bytes;
5584         int ret;
5585         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5586
5587         if (root->fs_info->quota_enabled) {
5588                 /* One for parent inode, two for dir entries */
5589                 num_bytes = 3 * root->nodesize;
5590                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5591                 if (ret)
5592                         return ret;
5593         } else {
5594                 num_bytes = 0;
5595         }
5596
5597         *qgroup_reserved = num_bytes;
5598
5599         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5600         rsv->space_info = __find_space_info(root->fs_info,
5601                                             BTRFS_BLOCK_GROUP_METADATA);
5602         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5603                                   BTRFS_RESERVE_FLUSH_ALL);
5604
5605         if (ret == -ENOSPC && use_global_rsv)
5606                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5607
5608         if (ret && *qgroup_reserved)
5609                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5610
5611         return ret;
5612 }
5613
5614 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5615                                       struct btrfs_block_rsv *rsv,
5616                                       u64 qgroup_reserved)
5617 {
5618         btrfs_block_rsv_release(root, rsv, (u64)-1);
5619 }
5620
5621 /**
5622  * drop_outstanding_extent - drop an outstanding extent
5623  * @inode: the inode we're dropping the extent for
5624  * @num_bytes: the number of bytes we're releasing.
5625  *
5626  * This is called when we are freeing up an outstanding extent, either called
5627  * after an error or after an extent is written.  This will return the number of
5628  * reserved extents that need to be freed.  This must be called with
5629  * BTRFS_I(inode)->lock held.
5630  */
5631 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5632 {
5633         unsigned drop_inode_space = 0;
5634         unsigned dropped_extents = 0;
5635         unsigned num_extents = 0;
5636
5637         num_extents = (unsigned)div64_u64(num_bytes +
5638                                           BTRFS_MAX_EXTENT_SIZE - 1,
5639                                           BTRFS_MAX_EXTENT_SIZE);
5640         ASSERT(num_extents);
5641         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5642         BTRFS_I(inode)->outstanding_extents -= num_extents;
5643
5644         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5645             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5646                                &BTRFS_I(inode)->runtime_flags))
5647                 drop_inode_space = 1;
5648
5649         /*
5650          * If we have more or the same amount of outstanding extents than we have
5651          * reserved then we need to leave the reserved extents count alone.
5652          */
5653         if (BTRFS_I(inode)->outstanding_extents >=
5654             BTRFS_I(inode)->reserved_extents)
5655                 return drop_inode_space;
5656
5657         dropped_extents = BTRFS_I(inode)->reserved_extents -
5658                 BTRFS_I(inode)->outstanding_extents;
5659         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5660         return dropped_extents + drop_inode_space;
5661 }
5662
5663 /**
5664  * calc_csum_metadata_size - return the amount of metadata space that must be
5665  *      reserved/freed for the given bytes.
5666  * @inode: the inode we're manipulating
5667  * @num_bytes: the number of bytes in question
5668  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5669  *
5670  * This adjusts the number of csum_bytes in the inode and then returns the
5671  * correct amount of metadata that must either be reserved or freed.  We
5672  * calculate how many checksums we can fit into one leaf and then divide the
5673  * number of bytes that will need to be checksumed by this value to figure out
5674  * how many checksums will be required.  If we are adding bytes then the number
5675  * may go up and we will return the number of additional bytes that must be
5676  * reserved.  If it is going down we will return the number of bytes that must
5677  * be freed.
5678  *
5679  * This must be called with BTRFS_I(inode)->lock held.
5680  */
5681 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5682                                    int reserve)
5683 {
5684         struct btrfs_root *root = BTRFS_I(inode)->root;
5685         u64 old_csums, num_csums;
5686
5687         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5688             BTRFS_I(inode)->csum_bytes == 0)
5689                 return 0;
5690
5691         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5692         if (reserve)
5693                 BTRFS_I(inode)->csum_bytes += num_bytes;
5694         else
5695                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5696         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5697
5698         /* No change, no need to reserve more */
5699         if (old_csums == num_csums)
5700                 return 0;
5701
5702         if (reserve)
5703                 return btrfs_calc_trans_metadata_size(root,
5704                                                       num_csums - old_csums);
5705
5706         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5707 }
5708
5709 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5710 {
5711         struct btrfs_root *root = BTRFS_I(inode)->root;
5712         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5713         u64 to_reserve = 0;
5714         u64 csum_bytes;
5715         unsigned nr_extents = 0;
5716         int extra_reserve = 0;
5717         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5718         int ret = 0;
5719         bool delalloc_lock = true;
5720         u64 to_free = 0;
5721         unsigned dropped;
5722
5723         /* If we are a free space inode we need to not flush since we will be in
5724          * the middle of a transaction commit.  We also don't need the delalloc
5725          * mutex since we won't race with anybody.  We need this mostly to make
5726          * lockdep shut its filthy mouth.
5727          */
5728         if (btrfs_is_free_space_inode(inode)) {
5729                 flush = BTRFS_RESERVE_NO_FLUSH;
5730                 delalloc_lock = false;
5731         }
5732
5733         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5734             btrfs_transaction_in_commit(root->fs_info))
5735                 schedule_timeout(1);
5736
5737         if (delalloc_lock)
5738                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5739
5740         num_bytes = ALIGN(num_bytes, root->sectorsize);
5741
5742         spin_lock(&BTRFS_I(inode)->lock);
5743         nr_extents = (unsigned)div64_u64(num_bytes +
5744                                          BTRFS_MAX_EXTENT_SIZE - 1,
5745                                          BTRFS_MAX_EXTENT_SIZE);
5746         BTRFS_I(inode)->outstanding_extents += nr_extents;
5747         nr_extents = 0;
5748
5749         if (BTRFS_I(inode)->outstanding_extents >
5750             BTRFS_I(inode)->reserved_extents)
5751                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5752                         BTRFS_I(inode)->reserved_extents;
5753
5754         /*
5755          * Add an item to reserve for updating the inode when we complete the
5756          * delalloc io.
5757          */
5758         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5759                       &BTRFS_I(inode)->runtime_flags)) {
5760                 nr_extents++;
5761                 extra_reserve = 1;
5762         }
5763
5764         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5765         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5766         csum_bytes = BTRFS_I(inode)->csum_bytes;
5767         spin_unlock(&BTRFS_I(inode)->lock);
5768
5769         if (root->fs_info->quota_enabled) {
5770                 ret = btrfs_qgroup_reserve_meta(root,
5771                                 nr_extents * root->nodesize);
5772                 if (ret)
5773                         goto out_fail;
5774         }
5775
5776         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5777         if (unlikely(ret)) {
5778                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5779                 goto out_fail;
5780         }
5781
5782         spin_lock(&BTRFS_I(inode)->lock);
5783         if (extra_reserve) {
5784                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5785                         &BTRFS_I(inode)->runtime_flags);
5786                 nr_extents--;
5787         }
5788         BTRFS_I(inode)->reserved_extents += nr_extents;
5789         spin_unlock(&BTRFS_I(inode)->lock);
5790
5791         if (delalloc_lock)
5792                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5793
5794         if (to_reserve)
5795                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5796                                               btrfs_ino(inode), to_reserve, 1);
5797         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5798
5799         return 0;
5800
5801 out_fail:
5802         spin_lock(&BTRFS_I(inode)->lock);
5803         dropped = drop_outstanding_extent(inode, num_bytes);
5804         /*
5805          * If the inodes csum_bytes is the same as the original
5806          * csum_bytes then we know we haven't raced with any free()ers
5807          * so we can just reduce our inodes csum bytes and carry on.
5808          */
5809         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5810                 calc_csum_metadata_size(inode, num_bytes, 0);
5811         } else {
5812                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5813                 u64 bytes;
5814
5815                 /*
5816                  * This is tricky, but first we need to figure out how much we
5817                  * freed from any free-ers that occurred during this
5818                  * reservation, so we reset ->csum_bytes to the csum_bytes
5819                  * before we dropped our lock, and then call the free for the
5820                  * number of bytes that were freed while we were trying our
5821                  * reservation.
5822                  */
5823                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5824                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5825                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5826
5827
5828                 /*
5829                  * Now we need to see how much we would have freed had we not
5830                  * been making this reservation and our ->csum_bytes were not
5831                  * artificially inflated.
5832                  */
5833                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5834                 bytes = csum_bytes - orig_csum_bytes;
5835                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5836
5837                 /*
5838                  * Now reset ->csum_bytes to what it should be.  If bytes is
5839                  * more than to_free then we would have freed more space had we
5840                  * not had an artificially high ->csum_bytes, so we need to free
5841                  * the remainder.  If bytes is the same or less then we don't
5842                  * need to do anything, the other free-ers did the correct
5843                  * thing.
5844                  */
5845                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5846                 if (bytes > to_free)
5847                         to_free = bytes - to_free;
5848                 else
5849                         to_free = 0;
5850         }
5851         spin_unlock(&BTRFS_I(inode)->lock);
5852         if (dropped)
5853                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5854
5855         if (to_free) {
5856                 btrfs_block_rsv_release(root, block_rsv, to_free);
5857                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5858                                               btrfs_ino(inode), to_free, 0);
5859         }
5860         if (delalloc_lock)
5861                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5862         return ret;
5863 }
5864
5865 /**
5866  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5867  * @inode: the inode to release the reservation for
5868  * @num_bytes: the number of bytes we're releasing
5869  *
5870  * This will release the metadata reservation for an inode.  This can be called
5871  * once we complete IO for a given set of bytes to release their metadata
5872  * reservations.
5873  */
5874 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5875 {
5876         struct btrfs_root *root = BTRFS_I(inode)->root;
5877         u64 to_free = 0;
5878         unsigned dropped;
5879
5880         num_bytes = ALIGN(num_bytes, root->sectorsize);
5881         spin_lock(&BTRFS_I(inode)->lock);
5882         dropped = drop_outstanding_extent(inode, num_bytes);
5883
5884         if (num_bytes)
5885                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5886         spin_unlock(&BTRFS_I(inode)->lock);
5887         if (dropped > 0)
5888                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5889
5890         if (btrfs_test_is_dummy_root(root))
5891                 return;
5892
5893         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5894                                       btrfs_ino(inode), to_free, 0);
5895
5896         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5897                                 to_free);
5898 }
5899
5900 /**
5901  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5902  * delalloc
5903  * @inode: inode we're writing to
5904  * @start: start range we are writing to
5905  * @len: how long the range we are writing to
5906  *
5907  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5908  *
5909  * This will do the following things
5910  *
5911  * o reserve space in data space info for num bytes
5912  *   and reserve precious corresponding qgroup space
5913  *   (Done in check_data_free_space)
5914  *
5915  * o reserve space for metadata space, based on the number of outstanding
5916  *   extents and how much csums will be needed
5917  *   also reserve metadata space in a per root over-reserve method.
5918  * o add to the inodes->delalloc_bytes
5919  * o add it to the fs_info's delalloc inodes list.
5920  *   (Above 3 all done in delalloc_reserve_metadata)
5921  *
5922  * Return 0 for success
5923  * Return <0 for error(-ENOSPC or -EQUOT)
5924  */
5925 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5926 {
5927         int ret;
5928
5929         ret = btrfs_check_data_free_space(inode, start, len);
5930         if (ret < 0)
5931                 return ret;
5932         ret = btrfs_delalloc_reserve_metadata(inode, len);
5933         if (ret < 0)
5934                 btrfs_free_reserved_data_space(inode, start, len);
5935         return ret;
5936 }
5937
5938 /**
5939  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5940  * @inode: inode we're releasing space for
5941  * @start: start position of the space already reserved
5942  * @len: the len of the space already reserved
5943  *
5944  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5945  * called in the case that we don't need the metadata AND data reservations
5946  * anymore.  So if there is an error or we insert an inline extent.
5947  *
5948  * This function will release the metadata space that was not used and will
5949  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5950  * list if there are no delalloc bytes left.
5951  * Also it will handle the qgroup reserved space.
5952  */
5953 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5954 {
5955         btrfs_delalloc_release_metadata(inode, len);
5956         btrfs_free_reserved_data_space(inode, start, len);
5957 }
5958
5959 static int update_block_group(struct btrfs_trans_handle *trans,
5960                               struct btrfs_root *root, u64 bytenr,
5961                               u64 num_bytes, int alloc)
5962 {
5963         struct btrfs_block_group_cache *cache = NULL;
5964         struct btrfs_fs_info *info = root->fs_info;
5965         u64 total = num_bytes;
5966         u64 old_val;
5967         u64 byte_in_group;
5968         int factor;
5969
5970         /* block accounting for super block */
5971         spin_lock(&info->delalloc_root_lock);
5972         old_val = btrfs_super_bytes_used(info->super_copy);
5973         if (alloc)
5974                 old_val += num_bytes;
5975         else
5976                 old_val -= num_bytes;
5977         btrfs_set_super_bytes_used(info->super_copy, old_val);
5978         spin_unlock(&info->delalloc_root_lock);
5979
5980         while (total) {
5981                 cache = btrfs_lookup_block_group(info, bytenr);
5982                 if (!cache)
5983                         return -ENOENT;
5984                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5985                                     BTRFS_BLOCK_GROUP_RAID1 |
5986                                     BTRFS_BLOCK_GROUP_RAID10))
5987                         factor = 2;
5988                 else
5989                         factor = 1;
5990                 /*
5991                  * If this block group has free space cache written out, we
5992                  * need to make sure to load it if we are removing space.  This
5993                  * is because we need the unpinning stage to actually add the
5994                  * space back to the block group, otherwise we will leak space.
5995                  */
5996                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5997                         cache_block_group(cache, 1);
5998
5999                 byte_in_group = bytenr - cache->key.objectid;
6000                 WARN_ON(byte_in_group > cache->key.offset);
6001
6002                 spin_lock(&cache->space_info->lock);
6003                 spin_lock(&cache->lock);
6004
6005                 if (btrfs_test_opt(root, SPACE_CACHE) &&
6006                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6007                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6008
6009                 old_val = btrfs_block_group_used(&cache->item);
6010                 num_bytes = min(total, cache->key.offset - byte_in_group);
6011                 if (alloc) {
6012                         old_val += num_bytes;
6013                         btrfs_set_block_group_used(&cache->item, old_val);
6014                         cache->reserved -= num_bytes;
6015                         cache->space_info->bytes_reserved -= num_bytes;
6016                         cache->space_info->bytes_used += num_bytes;
6017                         cache->space_info->disk_used += num_bytes * factor;
6018                         spin_unlock(&cache->lock);
6019                         spin_unlock(&cache->space_info->lock);
6020                 } else {
6021                         old_val -= num_bytes;
6022                         btrfs_set_block_group_used(&cache->item, old_val);
6023                         cache->pinned += num_bytes;
6024                         cache->space_info->bytes_pinned += num_bytes;
6025                         cache->space_info->bytes_used -= num_bytes;
6026                         cache->space_info->disk_used -= num_bytes * factor;
6027                         spin_unlock(&cache->lock);
6028                         spin_unlock(&cache->space_info->lock);
6029
6030                         set_extent_dirty(info->pinned_extents,
6031                                          bytenr, bytenr + num_bytes - 1,
6032                                          GFP_NOFS | __GFP_NOFAIL);
6033                 }
6034
6035                 spin_lock(&trans->transaction->dirty_bgs_lock);
6036                 if (list_empty(&cache->dirty_list)) {
6037                         list_add_tail(&cache->dirty_list,
6038                                       &trans->transaction->dirty_bgs);
6039                                 trans->transaction->num_dirty_bgs++;
6040                         btrfs_get_block_group(cache);
6041                 }
6042                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6043
6044                 /*
6045                  * No longer have used bytes in this block group, queue it for
6046                  * deletion. We do this after adding the block group to the
6047                  * dirty list to avoid races between cleaner kthread and space
6048                  * cache writeout.
6049                  */
6050                 if (!alloc && old_val == 0) {
6051                         spin_lock(&info->unused_bgs_lock);
6052                         if (list_empty(&cache->bg_list)) {
6053                                 btrfs_get_block_group(cache);
6054                                 list_add_tail(&cache->bg_list,
6055                                               &info->unused_bgs);
6056                         }
6057                         spin_unlock(&info->unused_bgs_lock);
6058                 }
6059
6060                 btrfs_put_block_group(cache);
6061                 total -= num_bytes;
6062                 bytenr += num_bytes;
6063         }
6064         return 0;
6065 }
6066
6067 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
6068 {
6069         struct btrfs_block_group_cache *cache;
6070         u64 bytenr;
6071
6072         spin_lock(&root->fs_info->block_group_cache_lock);
6073         bytenr = root->fs_info->first_logical_byte;
6074         spin_unlock(&root->fs_info->block_group_cache_lock);
6075
6076         if (bytenr < (u64)-1)
6077                 return bytenr;
6078
6079         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
6080         if (!cache)
6081                 return 0;
6082
6083         bytenr = cache->key.objectid;
6084         btrfs_put_block_group(cache);
6085
6086         return bytenr;
6087 }
6088
6089 static int pin_down_extent(struct btrfs_root *root,
6090                            struct btrfs_block_group_cache *cache,
6091                            u64 bytenr, u64 num_bytes, int reserved)
6092 {
6093         spin_lock(&cache->space_info->lock);
6094         spin_lock(&cache->lock);
6095         cache->pinned += num_bytes;
6096         cache->space_info->bytes_pinned += num_bytes;
6097         if (reserved) {
6098                 cache->reserved -= num_bytes;
6099                 cache->space_info->bytes_reserved -= num_bytes;
6100         }
6101         spin_unlock(&cache->lock);
6102         spin_unlock(&cache->space_info->lock);
6103
6104         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6105                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6106         if (reserved)
6107                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
6108         return 0;
6109 }
6110
6111 /*
6112  * this function must be called within transaction
6113  */
6114 int btrfs_pin_extent(struct btrfs_root *root,
6115                      u64 bytenr, u64 num_bytes, int reserved)
6116 {
6117         struct btrfs_block_group_cache *cache;
6118
6119         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6120         BUG_ON(!cache); /* Logic error */
6121
6122         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6123
6124         btrfs_put_block_group(cache);
6125         return 0;
6126 }
6127
6128 /*
6129  * this function must be called within transaction
6130  */
6131 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6132                                     u64 bytenr, u64 num_bytes)
6133 {
6134         struct btrfs_block_group_cache *cache;
6135         int ret;
6136
6137         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6138         if (!cache)
6139                 return -EINVAL;
6140
6141         /*
6142          * pull in the free space cache (if any) so that our pin
6143          * removes the free space from the cache.  We have load_only set
6144          * to one because the slow code to read in the free extents does check
6145          * the pinned extents.
6146          */
6147         cache_block_group(cache, 1);
6148
6149         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6150
6151         /* remove us from the free space cache (if we're there at all) */
6152         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6153         btrfs_put_block_group(cache);
6154         return ret;
6155 }
6156
6157 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6158 {
6159         int ret;
6160         struct btrfs_block_group_cache *block_group;
6161         struct btrfs_caching_control *caching_ctl;
6162
6163         block_group = btrfs_lookup_block_group(root->fs_info, start);
6164         if (!block_group)
6165                 return -EINVAL;
6166
6167         cache_block_group(block_group, 0);
6168         caching_ctl = get_caching_control(block_group);
6169
6170         if (!caching_ctl) {
6171                 /* Logic error */
6172                 BUG_ON(!block_group_cache_done(block_group));
6173                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6174         } else {
6175                 mutex_lock(&caching_ctl->mutex);
6176
6177                 if (start >= caching_ctl->progress) {
6178                         ret = add_excluded_extent(root, start, num_bytes);
6179                 } else if (start + num_bytes <= caching_ctl->progress) {
6180                         ret = btrfs_remove_free_space(block_group,
6181                                                       start, num_bytes);
6182                 } else {
6183                         num_bytes = caching_ctl->progress - start;
6184                         ret = btrfs_remove_free_space(block_group,
6185                                                       start, num_bytes);
6186                         if (ret)
6187                                 goto out_lock;
6188
6189                         num_bytes = (start + num_bytes) -
6190                                 caching_ctl->progress;
6191                         start = caching_ctl->progress;
6192                         ret = add_excluded_extent(root, start, num_bytes);
6193                 }
6194 out_lock:
6195                 mutex_unlock(&caching_ctl->mutex);
6196                 put_caching_control(caching_ctl);
6197         }
6198         btrfs_put_block_group(block_group);
6199         return ret;
6200 }
6201
6202 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6203                                  struct extent_buffer *eb)
6204 {
6205         struct btrfs_file_extent_item *item;
6206         struct btrfs_key key;
6207         int found_type;
6208         int i;
6209
6210         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6211                 return 0;
6212
6213         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6214                 btrfs_item_key_to_cpu(eb, &key, i);
6215                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6216                         continue;
6217                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6218                 found_type = btrfs_file_extent_type(eb, item);
6219                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6220                         continue;
6221                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6222                         continue;
6223                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6224                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6225                 __exclude_logged_extent(log, key.objectid, key.offset);
6226         }
6227
6228         return 0;
6229 }
6230
6231 static void
6232 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6233 {
6234         atomic_inc(&bg->reservations);
6235 }
6236
6237 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6238                                         const u64 start)
6239 {
6240         struct btrfs_block_group_cache *bg;
6241
6242         bg = btrfs_lookup_block_group(fs_info, start);
6243         ASSERT(bg);
6244         if (atomic_dec_and_test(&bg->reservations))
6245                 wake_up_atomic_t(&bg->reservations);
6246         btrfs_put_block_group(bg);
6247 }
6248
6249 static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6250 {
6251         schedule();
6252         return 0;
6253 }
6254
6255 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6256 {
6257         struct btrfs_space_info *space_info = bg->space_info;
6258
6259         ASSERT(bg->ro);
6260
6261         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6262                 return;
6263
6264         /*
6265          * Our block group is read only but before we set it to read only,
6266          * some task might have had allocated an extent from it already, but it
6267          * has not yet created a respective ordered extent (and added it to a
6268          * root's list of ordered extents).
6269          * Therefore wait for any task currently allocating extents, since the
6270          * block group's reservations counter is incremented while a read lock
6271          * on the groups' semaphore is held and decremented after releasing
6272          * the read access on that semaphore and creating the ordered extent.
6273          */
6274         down_write(&space_info->groups_sem);
6275         up_write(&space_info->groups_sem);
6276
6277         wait_on_atomic_t(&bg->reservations,
6278                          btrfs_wait_bg_reservations_atomic_t,
6279                          TASK_UNINTERRUPTIBLE);
6280 }
6281
6282 /**
6283  * btrfs_update_reserved_bytes - update the block_group and space info counters
6284  * @cache:      The cache we are manipulating
6285  * @num_bytes:  The number of bytes in question
6286  * @reserve:    One of the reservation enums
6287  * @delalloc:   The blocks are allocated for the delalloc write
6288  *
6289  * This is called by the allocator when it reserves space, or by somebody who is
6290  * freeing space that was never actually used on disk.  For example if you
6291  * reserve some space for a new leaf in transaction A and before transaction A
6292  * commits you free that leaf, you call this with reserve set to 0 in order to
6293  * clear the reservation.
6294  *
6295  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6296  * ENOSPC accounting.  For data we handle the reservation through clearing the
6297  * delalloc bits in the io_tree.  We have to do this since we could end up
6298  * allocating less disk space for the amount of data we have reserved in the
6299  * case of compression.
6300  *
6301  * If this is a reservation and the block group has become read only we cannot
6302  * make the reservation and return -EAGAIN, otherwise this function always
6303  * succeeds.
6304  */
6305 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6306                                        u64 num_bytes, int reserve, int delalloc)
6307 {
6308         struct btrfs_space_info *space_info = cache->space_info;
6309         int ret = 0;
6310
6311         spin_lock(&space_info->lock);
6312         spin_lock(&cache->lock);
6313         if (reserve != RESERVE_FREE) {
6314                 if (cache->ro) {
6315                         ret = -EAGAIN;
6316                 } else {
6317                         cache->reserved += num_bytes;
6318                         space_info->bytes_reserved += num_bytes;
6319                         if (reserve == RESERVE_ALLOC) {
6320                                 trace_btrfs_space_reservation(cache->fs_info,
6321                                                 "space_info", space_info->flags,
6322                                                 num_bytes, 0);
6323                                 space_info->bytes_may_use -= num_bytes;
6324                         }
6325
6326                         if (delalloc)
6327                                 cache->delalloc_bytes += num_bytes;
6328                 }
6329         } else {
6330                 if (cache->ro)
6331                         space_info->bytes_readonly += num_bytes;
6332                 cache->reserved -= num_bytes;
6333                 space_info->bytes_reserved -= num_bytes;
6334
6335                 if (delalloc)
6336                         cache->delalloc_bytes -= num_bytes;
6337         }
6338         spin_unlock(&cache->lock);
6339         spin_unlock(&space_info->lock);
6340         return ret;
6341 }
6342
6343 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6344                                 struct btrfs_root *root)
6345 {
6346         struct btrfs_fs_info *fs_info = root->fs_info;
6347         struct btrfs_caching_control *next;
6348         struct btrfs_caching_control *caching_ctl;
6349         struct btrfs_block_group_cache *cache;
6350
6351         down_write(&fs_info->commit_root_sem);
6352
6353         list_for_each_entry_safe(caching_ctl, next,
6354                                  &fs_info->caching_block_groups, list) {
6355                 cache = caching_ctl->block_group;
6356                 if (block_group_cache_done(cache)) {
6357                         cache->last_byte_to_unpin = (u64)-1;
6358                         list_del_init(&caching_ctl->list);
6359                         put_caching_control(caching_ctl);
6360                 } else {
6361                         cache->last_byte_to_unpin = caching_ctl->progress;
6362                 }
6363         }
6364
6365         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6366                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6367         else
6368                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6369
6370         up_write(&fs_info->commit_root_sem);
6371
6372         update_global_block_rsv(fs_info);
6373 }
6374
6375 /*
6376  * Returns the free cluster for the given space info and sets empty_cluster to
6377  * what it should be based on the mount options.
6378  */
6379 static struct btrfs_free_cluster *
6380 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6381                    u64 *empty_cluster)
6382 {
6383         struct btrfs_free_cluster *ret = NULL;
6384         bool ssd = btrfs_test_opt(root, SSD);
6385
6386         *empty_cluster = 0;
6387         if (btrfs_mixed_space_info(space_info))
6388                 return ret;
6389
6390         if (ssd)
6391                 *empty_cluster = SZ_2M;
6392         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6393                 ret = &root->fs_info->meta_alloc_cluster;
6394                 if (!ssd)
6395                         *empty_cluster = SZ_64K;
6396         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6397                 ret = &root->fs_info->data_alloc_cluster;
6398         }
6399
6400         return ret;
6401 }
6402
6403 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6404                               const bool return_free_space)
6405 {
6406         struct btrfs_fs_info *fs_info = root->fs_info;
6407         struct btrfs_block_group_cache *cache = NULL;
6408         struct btrfs_space_info *space_info;
6409         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6410         struct btrfs_free_cluster *cluster = NULL;
6411         u64 len;
6412         u64 total_unpinned = 0;
6413         u64 empty_cluster = 0;
6414         bool readonly;
6415
6416         while (start <= end) {
6417                 readonly = false;
6418                 if (!cache ||
6419                     start >= cache->key.objectid + cache->key.offset) {
6420                         if (cache)
6421                                 btrfs_put_block_group(cache);
6422                         total_unpinned = 0;
6423                         cache = btrfs_lookup_block_group(fs_info, start);
6424                         BUG_ON(!cache); /* Logic error */
6425
6426                         cluster = fetch_cluster_info(root,
6427                                                      cache->space_info,
6428                                                      &empty_cluster);
6429                         empty_cluster <<= 1;
6430                 }
6431
6432                 len = cache->key.objectid + cache->key.offset - start;
6433                 len = min(len, end + 1 - start);
6434
6435                 if (start < cache->last_byte_to_unpin) {
6436                         len = min(len, cache->last_byte_to_unpin - start);
6437                         if (return_free_space)
6438                                 btrfs_add_free_space(cache, start, len);
6439                 }
6440
6441                 start += len;
6442                 total_unpinned += len;
6443                 space_info = cache->space_info;
6444
6445                 /*
6446                  * If this space cluster has been marked as fragmented and we've
6447                  * unpinned enough in this block group to potentially allow a
6448                  * cluster to be created inside of it go ahead and clear the
6449                  * fragmented check.
6450                  */
6451                 if (cluster && cluster->fragmented &&
6452                     total_unpinned > empty_cluster) {
6453                         spin_lock(&cluster->lock);
6454                         cluster->fragmented = 0;
6455                         spin_unlock(&cluster->lock);
6456                 }
6457
6458                 spin_lock(&space_info->lock);
6459                 spin_lock(&cache->lock);
6460                 cache->pinned -= len;
6461                 space_info->bytes_pinned -= len;
6462                 space_info->max_extent_size = 0;
6463                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6464                 if (cache->ro) {
6465                         space_info->bytes_readonly += len;
6466                         readonly = true;
6467                 }
6468                 spin_unlock(&cache->lock);
6469                 if (!readonly && global_rsv->space_info == space_info) {
6470                         spin_lock(&global_rsv->lock);
6471                         if (!global_rsv->full) {
6472                                 len = min(len, global_rsv->size -
6473                                           global_rsv->reserved);
6474                                 global_rsv->reserved += len;
6475                                 space_info->bytes_may_use += len;
6476                                 if (global_rsv->reserved >= global_rsv->size)
6477                                         global_rsv->full = 1;
6478                         }
6479                         spin_unlock(&global_rsv->lock);
6480                 }
6481                 spin_unlock(&space_info->lock);
6482         }
6483
6484         if (cache)
6485                 btrfs_put_block_group(cache);
6486         return 0;
6487 }
6488
6489 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6490                                struct btrfs_root *root)
6491 {
6492         struct btrfs_fs_info *fs_info = root->fs_info;
6493         struct btrfs_block_group_cache *block_group, *tmp;
6494         struct list_head *deleted_bgs;
6495         struct extent_io_tree *unpin;
6496         u64 start;
6497         u64 end;
6498         int ret;
6499
6500         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6501                 unpin = &fs_info->freed_extents[1];
6502         else
6503                 unpin = &fs_info->freed_extents[0];
6504
6505         while (!trans->aborted) {
6506                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6507                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6508                                             EXTENT_DIRTY, NULL);
6509                 if (ret) {
6510                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6511                         break;
6512                 }
6513
6514                 if (btrfs_test_opt(root, DISCARD))
6515                         ret = btrfs_discard_extent(root, start,
6516                                                    end + 1 - start, NULL);
6517
6518                 clear_extent_dirty(unpin, start, end);
6519                 unpin_extent_range(root, start, end, true);
6520                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6521                 cond_resched();
6522         }
6523
6524         /*
6525          * Transaction is finished.  We don't need the lock anymore.  We
6526          * do need to clean up the block groups in case of a transaction
6527          * abort.
6528          */
6529         deleted_bgs = &trans->transaction->deleted_bgs;
6530         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6531                 u64 trimmed = 0;
6532
6533                 ret = -EROFS;
6534                 if (!trans->aborted)
6535                         ret = btrfs_discard_extent(root,
6536                                                    block_group->key.objectid,
6537                                                    block_group->key.offset,
6538                                                    &trimmed);
6539
6540                 list_del_init(&block_group->bg_list);
6541                 btrfs_put_block_group_trimming(block_group);
6542                 btrfs_put_block_group(block_group);
6543
6544                 if (ret) {
6545                         const char *errstr = btrfs_decode_error(ret);
6546                         btrfs_warn(fs_info,
6547                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6548                                    ret, errstr);
6549                 }
6550         }
6551
6552         return 0;
6553 }
6554
6555 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6556                              u64 owner, u64 root_objectid)
6557 {
6558         struct btrfs_space_info *space_info;
6559         u64 flags;
6560
6561         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6562                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6563                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6564                 else
6565                         flags = BTRFS_BLOCK_GROUP_METADATA;
6566         } else {
6567                 flags = BTRFS_BLOCK_GROUP_DATA;
6568         }
6569
6570         space_info = __find_space_info(fs_info, flags);
6571         BUG_ON(!space_info); /* Logic bug */
6572         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6573 }
6574
6575
6576 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6577                                 struct btrfs_root *root,
6578                                 struct btrfs_delayed_ref_node *node, u64 parent,
6579                                 u64 root_objectid, u64 owner_objectid,
6580                                 u64 owner_offset, int refs_to_drop,
6581                                 struct btrfs_delayed_extent_op *extent_op)
6582 {
6583         struct btrfs_key key;
6584         struct btrfs_path *path;
6585         struct btrfs_fs_info *info = root->fs_info;
6586         struct btrfs_root *extent_root = info->extent_root;
6587         struct extent_buffer *leaf;
6588         struct btrfs_extent_item *ei;
6589         struct btrfs_extent_inline_ref *iref;
6590         int ret;
6591         int is_data;
6592         int extent_slot = 0;
6593         int found_extent = 0;
6594         int num_to_del = 1;
6595         u32 item_size;
6596         u64 refs;
6597         u64 bytenr = node->bytenr;
6598         u64 num_bytes = node->num_bytes;
6599         int last_ref = 0;
6600         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6601                                                  SKINNY_METADATA);
6602
6603         path = btrfs_alloc_path();
6604         if (!path)
6605                 return -ENOMEM;
6606
6607         path->reada = READA_FORWARD;
6608         path->leave_spinning = 1;
6609
6610         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6611         BUG_ON(!is_data && refs_to_drop != 1);
6612
6613         if (is_data)
6614                 skinny_metadata = 0;
6615
6616         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6617                                     bytenr, num_bytes, parent,
6618                                     root_objectid, owner_objectid,
6619                                     owner_offset);
6620         if (ret == 0) {
6621                 extent_slot = path->slots[0];
6622                 while (extent_slot >= 0) {
6623                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6624                                               extent_slot);
6625                         if (key.objectid != bytenr)
6626                                 break;
6627                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6628                             key.offset == num_bytes) {
6629                                 found_extent = 1;
6630                                 break;
6631                         }
6632                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6633                             key.offset == owner_objectid) {
6634                                 found_extent = 1;
6635                                 break;
6636                         }
6637                         if (path->slots[0] - extent_slot > 5)
6638                                 break;
6639                         extent_slot--;
6640                 }
6641 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6642                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6643                 if (found_extent && item_size < sizeof(*ei))
6644                         found_extent = 0;
6645 #endif
6646                 if (!found_extent) {
6647                         BUG_ON(iref);
6648                         ret = remove_extent_backref(trans, extent_root, path,
6649                                                     NULL, refs_to_drop,
6650                                                     is_data, &last_ref);
6651                         if (ret) {
6652                                 btrfs_abort_transaction(trans, extent_root, ret);
6653                                 goto out;
6654                         }
6655                         btrfs_release_path(path);
6656                         path->leave_spinning = 1;
6657
6658                         key.objectid = bytenr;
6659                         key.type = BTRFS_EXTENT_ITEM_KEY;
6660                         key.offset = num_bytes;
6661
6662                         if (!is_data && skinny_metadata) {
6663                                 key.type = BTRFS_METADATA_ITEM_KEY;
6664                                 key.offset = owner_objectid;
6665                         }
6666
6667                         ret = btrfs_search_slot(trans, extent_root,
6668                                                 &key, path, -1, 1);
6669                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6670                                 /*
6671                                  * Couldn't find our skinny metadata item,
6672                                  * see if we have ye olde extent item.
6673                                  */
6674                                 path->slots[0]--;
6675                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6676                                                       path->slots[0]);
6677                                 if (key.objectid == bytenr &&
6678                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6679                                     key.offset == num_bytes)
6680                                         ret = 0;
6681                         }
6682
6683                         if (ret > 0 && skinny_metadata) {
6684                                 skinny_metadata = false;
6685                                 key.objectid = bytenr;
6686                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6687                                 key.offset = num_bytes;
6688                                 btrfs_release_path(path);
6689                                 ret = btrfs_search_slot(trans, extent_root,
6690                                                         &key, path, -1, 1);
6691                         }
6692
6693                         if (ret) {
6694                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6695                                         ret, bytenr);
6696                                 if (ret > 0)
6697                                         btrfs_print_leaf(extent_root,
6698                                                          path->nodes[0]);
6699                         }
6700                         if (ret < 0) {
6701                                 btrfs_abort_transaction(trans, extent_root, ret);
6702                                 goto out;
6703                         }
6704                         extent_slot = path->slots[0];
6705                 }
6706         } else if (WARN_ON(ret == -ENOENT)) {
6707                 btrfs_print_leaf(extent_root, path->nodes[0]);
6708                 btrfs_err(info,
6709                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6710                         bytenr, parent, root_objectid, owner_objectid,
6711                         owner_offset);
6712                 btrfs_abort_transaction(trans, extent_root, ret);
6713                 goto out;
6714         } else {
6715                 btrfs_abort_transaction(trans, extent_root, ret);
6716                 goto out;
6717         }
6718
6719         leaf = path->nodes[0];
6720         item_size = btrfs_item_size_nr(leaf, extent_slot);
6721 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6722         if (item_size < sizeof(*ei)) {
6723                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6724                 ret = convert_extent_item_v0(trans, extent_root, path,
6725                                              owner_objectid, 0);
6726                 if (ret < 0) {
6727                         btrfs_abort_transaction(trans, extent_root, ret);
6728                         goto out;
6729                 }
6730
6731                 btrfs_release_path(path);
6732                 path->leave_spinning = 1;
6733
6734                 key.objectid = bytenr;
6735                 key.type = BTRFS_EXTENT_ITEM_KEY;
6736                 key.offset = num_bytes;
6737
6738                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6739                                         -1, 1);
6740                 if (ret) {
6741                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6742                                 ret, bytenr);
6743                         btrfs_print_leaf(extent_root, path->nodes[0]);
6744                 }
6745                 if (ret < 0) {
6746                         btrfs_abort_transaction(trans, extent_root, ret);
6747                         goto out;
6748                 }
6749
6750                 extent_slot = path->slots[0];
6751                 leaf = path->nodes[0];
6752                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6753         }
6754 #endif
6755         BUG_ON(item_size < sizeof(*ei));
6756         ei = btrfs_item_ptr(leaf, extent_slot,
6757                             struct btrfs_extent_item);
6758         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6759             key.type == BTRFS_EXTENT_ITEM_KEY) {
6760                 struct btrfs_tree_block_info *bi;
6761                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6762                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6763                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6764         }
6765
6766         refs = btrfs_extent_refs(leaf, ei);
6767         if (refs < refs_to_drop) {
6768                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6769                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6770                 ret = -EINVAL;
6771                 btrfs_abort_transaction(trans, extent_root, ret);
6772                 goto out;
6773         }
6774         refs -= refs_to_drop;
6775
6776         if (refs > 0) {
6777                 if (extent_op)
6778                         __run_delayed_extent_op(extent_op, leaf, ei);
6779                 /*
6780                  * In the case of inline back ref, reference count will
6781                  * be updated by remove_extent_backref
6782                  */
6783                 if (iref) {
6784                         BUG_ON(!found_extent);
6785                 } else {
6786                         btrfs_set_extent_refs(leaf, ei, refs);
6787                         btrfs_mark_buffer_dirty(leaf);
6788                 }
6789                 if (found_extent) {
6790                         ret = remove_extent_backref(trans, extent_root, path,
6791                                                     iref, refs_to_drop,
6792                                                     is_data, &last_ref);
6793                         if (ret) {
6794                                 btrfs_abort_transaction(trans, extent_root, ret);
6795                                 goto out;
6796                         }
6797                 }
6798                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6799                                  root_objectid);
6800         } else {
6801                 if (found_extent) {
6802                         BUG_ON(is_data && refs_to_drop !=
6803                                extent_data_ref_count(path, iref));
6804                         if (iref) {
6805                                 BUG_ON(path->slots[0] != extent_slot);
6806                         } else {
6807                                 BUG_ON(path->slots[0] != extent_slot + 1);
6808                                 path->slots[0] = extent_slot;
6809                                 num_to_del = 2;
6810                         }
6811                 }
6812
6813                 last_ref = 1;
6814                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6815                                       num_to_del);
6816                 if (ret) {
6817                         btrfs_abort_transaction(trans, extent_root, ret);
6818                         goto out;
6819                 }
6820                 btrfs_release_path(path);
6821
6822                 if (is_data) {
6823                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6824                         if (ret) {
6825                                 btrfs_abort_transaction(trans, extent_root, ret);
6826                                 goto out;
6827                         }
6828                 }
6829
6830                 ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
6831                                              num_bytes);
6832                 if (ret) {
6833                         btrfs_abort_transaction(trans, extent_root, ret);
6834                         goto out;
6835                 }
6836
6837                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6838                 if (ret) {
6839                         btrfs_abort_transaction(trans, extent_root, ret);
6840                         goto out;
6841                 }
6842         }
6843         btrfs_release_path(path);
6844
6845 out:
6846         btrfs_free_path(path);
6847         return ret;
6848 }
6849
6850 /*
6851  * when we free an block, it is possible (and likely) that we free the last
6852  * delayed ref for that extent as well.  This searches the delayed ref tree for
6853  * a given extent, and if there are no other delayed refs to be processed, it
6854  * removes it from the tree.
6855  */
6856 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6857                                       struct btrfs_root *root, u64 bytenr)
6858 {
6859         struct btrfs_delayed_ref_head *head;
6860         struct btrfs_delayed_ref_root *delayed_refs;
6861         int ret = 0;
6862
6863         delayed_refs = &trans->transaction->delayed_refs;
6864         spin_lock(&delayed_refs->lock);
6865         head = btrfs_find_delayed_ref_head(trans, bytenr);
6866         if (!head)
6867                 goto out_delayed_unlock;
6868
6869         spin_lock(&head->lock);
6870         if (!list_empty(&head->ref_list))
6871                 goto out;
6872
6873         if (head->extent_op) {
6874                 if (!head->must_insert_reserved)
6875                         goto out;
6876                 btrfs_free_delayed_extent_op(head->extent_op);
6877                 head->extent_op = NULL;
6878         }
6879
6880         /*
6881          * waiting for the lock here would deadlock.  If someone else has it
6882          * locked they are already in the process of dropping it anyway
6883          */
6884         if (!mutex_trylock(&head->mutex))
6885                 goto out;
6886
6887         /*
6888          * at this point we have a head with no other entries.  Go
6889          * ahead and process it.
6890          */
6891         head->node.in_tree = 0;
6892         rb_erase(&head->href_node, &delayed_refs->href_root);
6893
6894         atomic_dec(&delayed_refs->num_entries);
6895
6896         /*
6897          * we don't take a ref on the node because we're removing it from the
6898          * tree, so we just steal the ref the tree was holding.
6899          */
6900         delayed_refs->num_heads--;
6901         if (head->processing == 0)
6902                 delayed_refs->num_heads_ready--;
6903         head->processing = 0;
6904         spin_unlock(&head->lock);
6905         spin_unlock(&delayed_refs->lock);
6906
6907         BUG_ON(head->extent_op);
6908         if (head->must_insert_reserved)
6909                 ret = 1;
6910
6911         mutex_unlock(&head->mutex);
6912         btrfs_put_delayed_ref(&head->node);
6913         return ret;
6914 out:
6915         spin_unlock(&head->lock);
6916
6917 out_delayed_unlock:
6918         spin_unlock(&delayed_refs->lock);
6919         return 0;
6920 }
6921
6922 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6923                            struct btrfs_root *root,
6924                            struct extent_buffer *buf,
6925                            u64 parent, int last_ref)
6926 {
6927         int pin = 1;
6928         int ret;
6929
6930         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6931                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6932                                         buf->start, buf->len,
6933                                         parent, root->root_key.objectid,
6934                                         btrfs_header_level(buf),
6935                                         BTRFS_DROP_DELAYED_REF, NULL);
6936                 BUG_ON(ret); /* -ENOMEM */
6937         }
6938
6939         if (!last_ref)
6940                 return;
6941
6942         if (btrfs_header_generation(buf) == trans->transid) {
6943                 struct btrfs_block_group_cache *cache;
6944
6945                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6946                         ret = check_ref_cleanup(trans, root, buf->start);
6947                         if (!ret)
6948                                 goto out;
6949                 }
6950
6951                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6952
6953                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6954                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6955                         btrfs_put_block_group(cache);
6956                         goto out;
6957                 }
6958
6959                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6960
6961                 btrfs_add_free_space(cache, buf->start, buf->len);
6962                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6963                 btrfs_put_block_group(cache);
6964                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6965                 pin = 0;
6966         }
6967 out:
6968         if (pin)
6969                 add_pinned_bytes(root->fs_info, buf->len,
6970                                  btrfs_header_level(buf),
6971                                  root->root_key.objectid);
6972
6973         /*
6974          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6975          * anymore.
6976          */
6977         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6978 }
6979
6980 /* Can return -ENOMEM */
6981 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6982                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6983                       u64 owner, u64 offset)
6984 {
6985         int ret;
6986         struct btrfs_fs_info *fs_info = root->fs_info;
6987
6988         if (btrfs_test_is_dummy_root(root))
6989                 return 0;
6990
6991         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6992
6993         /*
6994          * tree log blocks never actually go into the extent allocation
6995          * tree, just update pinning info and exit early.
6996          */
6997         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6998                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6999                 /* unlocks the pinned mutex */
7000                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
7001                 ret = 0;
7002         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7003                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7004                                         num_bytes,
7005                                         parent, root_objectid, (int)owner,
7006                                         BTRFS_DROP_DELAYED_REF, NULL);
7007         } else {
7008                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7009                                                 num_bytes,
7010                                                 parent, root_objectid, owner,
7011                                                 offset, 0,
7012                                                 BTRFS_DROP_DELAYED_REF, NULL);
7013         }
7014         return ret;
7015 }
7016
7017 /*
7018  * when we wait for progress in the block group caching, its because
7019  * our allocation attempt failed at least once.  So, we must sleep
7020  * and let some progress happen before we try again.
7021  *
7022  * This function will sleep at least once waiting for new free space to
7023  * show up, and then it will check the block group free space numbers
7024  * for our min num_bytes.  Another option is to have it go ahead
7025  * and look in the rbtree for a free extent of a given size, but this
7026  * is a good start.
7027  *
7028  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7029  * any of the information in this block group.
7030  */
7031 static noinline void
7032 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7033                                 u64 num_bytes)
7034 {
7035         struct btrfs_caching_control *caching_ctl;
7036
7037         caching_ctl = get_caching_control(cache);
7038         if (!caching_ctl)
7039                 return;
7040
7041         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7042                    (cache->free_space_ctl->free_space >= num_bytes));
7043
7044         put_caching_control(caching_ctl);
7045 }
7046
7047 static noinline int
7048 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7049 {
7050         struct btrfs_caching_control *caching_ctl;
7051         int ret = 0;
7052
7053         caching_ctl = get_caching_control(cache);
7054         if (!caching_ctl)
7055                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7056
7057         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7058         if (cache->cached == BTRFS_CACHE_ERROR)
7059                 ret = -EIO;
7060         put_caching_control(caching_ctl);
7061         return ret;
7062 }
7063
7064 int __get_raid_index(u64 flags)
7065 {
7066         if (flags & BTRFS_BLOCK_GROUP_RAID10)
7067                 return BTRFS_RAID_RAID10;
7068         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
7069                 return BTRFS_RAID_RAID1;
7070         else if (flags & BTRFS_BLOCK_GROUP_DUP)
7071                 return BTRFS_RAID_DUP;
7072         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
7073                 return BTRFS_RAID_RAID0;
7074         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
7075                 return BTRFS_RAID_RAID5;
7076         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
7077                 return BTRFS_RAID_RAID6;
7078
7079         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
7080 }
7081
7082 int get_block_group_index(struct btrfs_block_group_cache *cache)
7083 {
7084         return __get_raid_index(cache->flags);
7085 }
7086
7087 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7088         [BTRFS_RAID_RAID10]     = "raid10",
7089         [BTRFS_RAID_RAID1]      = "raid1",
7090         [BTRFS_RAID_DUP]        = "dup",
7091         [BTRFS_RAID_RAID0]      = "raid0",
7092         [BTRFS_RAID_SINGLE]     = "single",
7093         [BTRFS_RAID_RAID5]      = "raid5",
7094         [BTRFS_RAID_RAID6]      = "raid6",
7095 };
7096
7097 static const char *get_raid_name(enum btrfs_raid_types type)
7098 {
7099         if (type >= BTRFS_NR_RAID_TYPES)
7100                 return NULL;
7101
7102         return btrfs_raid_type_names[type];
7103 }
7104
7105 enum btrfs_loop_type {
7106         LOOP_CACHING_NOWAIT = 0,
7107         LOOP_CACHING_WAIT = 1,
7108         LOOP_ALLOC_CHUNK = 2,
7109         LOOP_NO_EMPTY_SIZE = 3,
7110 };
7111
7112 static inline void
7113 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7114                        int delalloc)
7115 {
7116         if (delalloc)
7117                 down_read(&cache->data_rwsem);
7118 }
7119
7120 static inline void
7121 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7122                        int delalloc)
7123 {
7124         btrfs_get_block_group(cache);
7125         if (delalloc)
7126                 down_read(&cache->data_rwsem);
7127 }
7128
7129 static struct btrfs_block_group_cache *
7130 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7131                    struct btrfs_free_cluster *cluster,
7132                    int delalloc)
7133 {
7134         struct btrfs_block_group_cache *used_bg = NULL;
7135
7136         spin_lock(&cluster->refill_lock);
7137         while (1) {
7138                 used_bg = cluster->block_group;
7139                 if (!used_bg)
7140                         return NULL;
7141
7142                 if (used_bg == block_group)
7143                         return used_bg;
7144
7145                 btrfs_get_block_group(used_bg);
7146
7147                 if (!delalloc)
7148                         return used_bg;
7149
7150                 if (down_read_trylock(&used_bg->data_rwsem))
7151                         return used_bg;
7152
7153                 spin_unlock(&cluster->refill_lock);
7154
7155                 down_read(&used_bg->data_rwsem);
7156
7157                 spin_lock(&cluster->refill_lock);
7158                 if (used_bg == cluster->block_group)
7159                         return used_bg;
7160
7161                 up_read(&used_bg->data_rwsem);
7162                 btrfs_put_block_group(used_bg);
7163         }
7164 }
7165
7166 static inline void
7167 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7168                          int delalloc)
7169 {
7170         if (delalloc)
7171                 up_read(&cache->data_rwsem);
7172         btrfs_put_block_group(cache);
7173 }
7174
7175 /*
7176  * walks the btree of allocated extents and find a hole of a given size.
7177  * The key ins is changed to record the hole:
7178  * ins->objectid == start position
7179  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7180  * ins->offset == the size of the hole.
7181  * Any available blocks before search_start are skipped.
7182  *
7183  * If there is no suitable free space, we will record the max size of
7184  * the free space extent currently.
7185  */
7186 static noinline int find_free_extent(struct btrfs_root *orig_root,
7187                                      u64 num_bytes, u64 empty_size,
7188                                      u64 hint_byte, struct btrfs_key *ins,
7189                                      u64 flags, int delalloc)
7190 {
7191         int ret = 0;
7192         struct btrfs_root *root = orig_root->fs_info->extent_root;
7193         struct btrfs_free_cluster *last_ptr = NULL;
7194         struct btrfs_block_group_cache *block_group = NULL;
7195         u64 search_start = 0;
7196         u64 max_extent_size = 0;
7197         u64 empty_cluster = 0;
7198         struct btrfs_space_info *space_info;
7199         int loop = 0;
7200         int index = __get_raid_index(flags);
7201         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7202                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7203         bool failed_cluster_refill = false;
7204         bool failed_alloc = false;
7205         bool use_cluster = true;
7206         bool have_caching_bg = false;
7207         bool orig_have_caching_bg = false;
7208         bool full_search = false;
7209
7210         WARN_ON(num_bytes < root->sectorsize);
7211         ins->type = BTRFS_EXTENT_ITEM_KEY;
7212         ins->objectid = 0;
7213         ins->offset = 0;
7214
7215         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7216
7217         space_info = __find_space_info(root->fs_info, flags);
7218         if (!space_info) {
7219                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7220                 return -ENOSPC;
7221         }
7222
7223         /*
7224          * If our free space is heavily fragmented we may not be able to make
7225          * big contiguous allocations, so instead of doing the expensive search
7226          * for free space, simply return ENOSPC with our max_extent_size so we
7227          * can go ahead and search for a more manageable chunk.
7228          *
7229          * If our max_extent_size is large enough for our allocation simply
7230          * disable clustering since we will likely not be able to find enough
7231          * space to create a cluster and induce latency trying.
7232          */
7233         if (unlikely(space_info->max_extent_size)) {
7234                 spin_lock(&space_info->lock);
7235                 if (space_info->max_extent_size &&
7236                     num_bytes > space_info->max_extent_size) {
7237                         ins->offset = space_info->max_extent_size;
7238                         spin_unlock(&space_info->lock);
7239                         return -ENOSPC;
7240                 } else if (space_info->max_extent_size) {
7241                         use_cluster = false;
7242                 }
7243                 spin_unlock(&space_info->lock);
7244         }
7245
7246         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7247         if (last_ptr) {
7248                 spin_lock(&last_ptr->lock);
7249                 if (last_ptr->block_group)
7250                         hint_byte = last_ptr->window_start;
7251                 if (last_ptr->fragmented) {
7252                         /*
7253                          * We still set window_start so we can keep track of the
7254                          * last place we found an allocation to try and save
7255                          * some time.
7256                          */
7257                         hint_byte = last_ptr->window_start;
7258                         use_cluster = false;
7259                 }
7260                 spin_unlock(&last_ptr->lock);
7261         }
7262
7263         search_start = max(search_start, first_logical_byte(root, 0));
7264         search_start = max(search_start, hint_byte);
7265         if (search_start == hint_byte) {
7266                 block_group = btrfs_lookup_block_group(root->fs_info,
7267                                                        search_start);
7268                 /*
7269                  * we don't want to use the block group if it doesn't match our
7270                  * allocation bits, or if its not cached.
7271                  *
7272                  * However if we are re-searching with an ideal block group
7273                  * picked out then we don't care that the block group is cached.
7274                  */
7275                 if (block_group && block_group_bits(block_group, flags) &&
7276                     block_group->cached != BTRFS_CACHE_NO) {
7277                         down_read(&space_info->groups_sem);
7278                         if (list_empty(&block_group->list) ||
7279                             block_group->ro) {
7280                                 /*
7281                                  * someone is removing this block group,
7282                                  * we can't jump into the have_block_group
7283                                  * target because our list pointers are not
7284                                  * valid
7285                                  */
7286                                 btrfs_put_block_group(block_group);
7287                                 up_read(&space_info->groups_sem);
7288                         } else {
7289                                 index = get_block_group_index(block_group);
7290                                 btrfs_lock_block_group(block_group, delalloc);
7291                                 goto have_block_group;
7292                         }
7293                 } else if (block_group) {
7294                         btrfs_put_block_group(block_group);
7295                 }
7296         }
7297 search:
7298         have_caching_bg = false;
7299         if (index == 0 || index == __get_raid_index(flags))
7300                 full_search = true;
7301         down_read(&space_info->groups_sem);
7302         list_for_each_entry(block_group, &space_info->block_groups[index],
7303                             list) {
7304                 u64 offset;
7305                 int cached;
7306
7307                 btrfs_grab_block_group(block_group, delalloc);
7308                 search_start = block_group->key.objectid;
7309
7310                 /*
7311                  * this can happen if we end up cycling through all the
7312                  * raid types, but we want to make sure we only allocate
7313                  * for the proper type.
7314                  */
7315                 if (!block_group_bits(block_group, flags)) {
7316                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7317                                 BTRFS_BLOCK_GROUP_RAID1 |
7318                                 BTRFS_BLOCK_GROUP_RAID5 |
7319                                 BTRFS_BLOCK_GROUP_RAID6 |
7320                                 BTRFS_BLOCK_GROUP_RAID10;
7321
7322                         /*
7323                          * if they asked for extra copies and this block group
7324                          * doesn't provide them, bail.  This does allow us to
7325                          * fill raid0 from raid1.
7326                          */
7327                         if ((flags & extra) && !(block_group->flags & extra))
7328                                 goto loop;
7329                 }
7330
7331 have_block_group:
7332                 cached = block_group_cache_done(block_group);
7333                 if (unlikely(!cached)) {
7334                         have_caching_bg = true;
7335                         ret = cache_block_group(block_group, 0);
7336                         BUG_ON(ret < 0);
7337                         ret = 0;
7338                 }
7339
7340                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7341                         goto loop;
7342                 if (unlikely(block_group->ro))
7343                         goto loop;
7344
7345                 /*
7346                  * Ok we want to try and use the cluster allocator, so
7347                  * lets look there
7348                  */
7349                 if (last_ptr && use_cluster) {
7350                         struct btrfs_block_group_cache *used_block_group;
7351                         unsigned long aligned_cluster;
7352                         /*
7353                          * the refill lock keeps out other
7354                          * people trying to start a new cluster
7355                          */
7356                         used_block_group = btrfs_lock_cluster(block_group,
7357                                                               last_ptr,
7358                                                               delalloc);
7359                         if (!used_block_group)
7360                                 goto refill_cluster;
7361
7362                         if (used_block_group != block_group &&
7363                             (used_block_group->ro ||
7364                              !block_group_bits(used_block_group, flags)))
7365                                 goto release_cluster;
7366
7367                         offset = btrfs_alloc_from_cluster(used_block_group,
7368                                                 last_ptr,
7369                                                 num_bytes,
7370                                                 used_block_group->key.objectid,
7371                                                 &max_extent_size);
7372                         if (offset) {
7373                                 /* we have a block, we're done */
7374                                 spin_unlock(&last_ptr->refill_lock);
7375                                 trace_btrfs_reserve_extent_cluster(root,
7376                                                 used_block_group,
7377                                                 search_start, num_bytes);
7378                                 if (used_block_group != block_group) {
7379                                         btrfs_release_block_group(block_group,
7380                                                                   delalloc);
7381                                         block_group = used_block_group;
7382                                 }
7383                                 goto checks;
7384                         }
7385
7386                         WARN_ON(last_ptr->block_group != used_block_group);
7387 release_cluster:
7388                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7389                          * set up a new clusters, so lets just skip it
7390                          * and let the allocator find whatever block
7391                          * it can find.  If we reach this point, we
7392                          * will have tried the cluster allocator
7393                          * plenty of times and not have found
7394                          * anything, so we are likely way too
7395                          * fragmented for the clustering stuff to find
7396                          * anything.
7397                          *
7398                          * However, if the cluster is taken from the
7399                          * current block group, release the cluster
7400                          * first, so that we stand a better chance of
7401                          * succeeding in the unclustered
7402                          * allocation.  */
7403                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7404                             used_block_group != block_group) {
7405                                 spin_unlock(&last_ptr->refill_lock);
7406                                 btrfs_release_block_group(used_block_group,
7407                                                           delalloc);
7408                                 goto unclustered_alloc;
7409                         }
7410
7411                         /*
7412                          * this cluster didn't work out, free it and
7413                          * start over
7414                          */
7415                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7416
7417                         if (used_block_group != block_group)
7418                                 btrfs_release_block_group(used_block_group,
7419                                                           delalloc);
7420 refill_cluster:
7421                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7422                                 spin_unlock(&last_ptr->refill_lock);
7423                                 goto unclustered_alloc;
7424                         }
7425
7426                         aligned_cluster = max_t(unsigned long,
7427                                                 empty_cluster + empty_size,
7428                                               block_group->full_stripe_len);
7429
7430                         /* allocate a cluster in this block group */
7431                         ret = btrfs_find_space_cluster(root, block_group,
7432                                                        last_ptr, search_start,
7433                                                        num_bytes,
7434                                                        aligned_cluster);
7435                         if (ret == 0) {
7436                                 /*
7437                                  * now pull our allocation out of this
7438                                  * cluster
7439                                  */
7440                                 offset = btrfs_alloc_from_cluster(block_group,
7441                                                         last_ptr,
7442                                                         num_bytes,
7443                                                         search_start,
7444                                                         &max_extent_size);
7445                                 if (offset) {
7446                                         /* we found one, proceed */
7447                                         spin_unlock(&last_ptr->refill_lock);
7448                                         trace_btrfs_reserve_extent_cluster(root,
7449                                                 block_group, search_start,
7450                                                 num_bytes);
7451                                         goto checks;
7452                                 }
7453                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7454                                    && !failed_cluster_refill) {
7455                                 spin_unlock(&last_ptr->refill_lock);
7456
7457                                 failed_cluster_refill = true;
7458                                 wait_block_group_cache_progress(block_group,
7459                                        num_bytes + empty_cluster + empty_size);
7460                                 goto have_block_group;
7461                         }
7462
7463                         /*
7464                          * at this point we either didn't find a cluster
7465                          * or we weren't able to allocate a block from our
7466                          * cluster.  Free the cluster we've been trying
7467                          * to use, and go to the next block group
7468                          */
7469                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7470                         spin_unlock(&last_ptr->refill_lock);
7471                         goto loop;
7472                 }
7473
7474 unclustered_alloc:
7475                 /*
7476                  * We are doing an unclustered alloc, set the fragmented flag so
7477                  * we don't bother trying to setup a cluster again until we get
7478                  * more space.
7479                  */
7480                 if (unlikely(last_ptr)) {
7481                         spin_lock(&last_ptr->lock);
7482                         last_ptr->fragmented = 1;
7483                         spin_unlock(&last_ptr->lock);
7484                 }
7485                 spin_lock(&block_group->free_space_ctl->tree_lock);
7486                 if (cached &&
7487                     block_group->free_space_ctl->free_space <
7488                     num_bytes + empty_cluster + empty_size) {
7489                         if (block_group->free_space_ctl->free_space >
7490                             max_extent_size)
7491                                 max_extent_size =
7492                                         block_group->free_space_ctl->free_space;
7493                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7494                         goto loop;
7495                 }
7496                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7497
7498                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7499                                                     num_bytes, empty_size,
7500                                                     &max_extent_size);
7501                 /*
7502                  * If we didn't find a chunk, and we haven't failed on this
7503                  * block group before, and this block group is in the middle of
7504                  * caching and we are ok with waiting, then go ahead and wait
7505                  * for progress to be made, and set failed_alloc to true.
7506                  *
7507                  * If failed_alloc is true then we've already waited on this
7508                  * block group once and should move on to the next block group.
7509                  */
7510                 if (!offset && !failed_alloc && !cached &&
7511                     loop > LOOP_CACHING_NOWAIT) {
7512                         wait_block_group_cache_progress(block_group,
7513                                                 num_bytes + empty_size);
7514                         failed_alloc = true;
7515                         goto have_block_group;
7516                 } else if (!offset) {
7517                         goto loop;
7518                 }
7519 checks:
7520                 search_start = ALIGN(offset, root->stripesize);
7521
7522                 /* move on to the next group */
7523                 if (search_start + num_bytes >
7524                     block_group->key.objectid + block_group->key.offset) {
7525                         btrfs_add_free_space(block_group, offset, num_bytes);
7526                         goto loop;
7527                 }
7528
7529                 if (offset < search_start)
7530                         btrfs_add_free_space(block_group, offset,
7531                                              search_start - offset);
7532                 BUG_ON(offset > search_start);
7533
7534                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7535                                                   alloc_type, delalloc);
7536                 if (ret == -EAGAIN) {
7537                         btrfs_add_free_space(block_group, offset, num_bytes);
7538                         goto loop;
7539                 }
7540                 btrfs_inc_block_group_reservations(block_group);
7541
7542                 /* we are all good, lets return */
7543                 ins->objectid = search_start;
7544                 ins->offset = num_bytes;
7545
7546                 trace_btrfs_reserve_extent(orig_root, block_group,
7547                                            search_start, num_bytes);
7548                 btrfs_release_block_group(block_group, delalloc);
7549                 break;
7550 loop:
7551                 failed_cluster_refill = false;
7552                 failed_alloc = false;
7553                 BUG_ON(index != get_block_group_index(block_group));
7554                 btrfs_release_block_group(block_group, delalloc);
7555         }
7556         up_read(&space_info->groups_sem);
7557
7558         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7559                 && !orig_have_caching_bg)
7560                 orig_have_caching_bg = true;
7561
7562         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7563                 goto search;
7564
7565         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7566                 goto search;
7567
7568         /*
7569          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7570          *                      caching kthreads as we move along
7571          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7572          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7573          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7574          *                      again
7575          */
7576         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7577                 index = 0;
7578                 if (loop == LOOP_CACHING_NOWAIT) {
7579                         /*
7580                          * We want to skip the LOOP_CACHING_WAIT step if we
7581                          * don't have any uncached bgs and we've already done a
7582                          * full search through.
7583                          */
7584                         if (orig_have_caching_bg || !full_search)
7585                                 loop = LOOP_CACHING_WAIT;
7586                         else
7587                                 loop = LOOP_ALLOC_CHUNK;
7588                 } else {
7589                         loop++;
7590                 }
7591
7592                 if (loop == LOOP_ALLOC_CHUNK) {
7593                         struct btrfs_trans_handle *trans;
7594                         int exist = 0;
7595
7596                         trans = current->journal_info;
7597                         if (trans)
7598                                 exist = 1;
7599                         else
7600                                 trans = btrfs_join_transaction(root);
7601
7602                         if (IS_ERR(trans)) {
7603                                 ret = PTR_ERR(trans);
7604                                 goto out;
7605                         }
7606
7607                         ret = do_chunk_alloc(trans, root, flags,
7608                                              CHUNK_ALLOC_FORCE);
7609
7610                         /*
7611                          * If we can't allocate a new chunk we've already looped
7612                          * through at least once, move on to the NO_EMPTY_SIZE
7613                          * case.
7614                          */
7615                         if (ret == -ENOSPC)
7616                                 loop = LOOP_NO_EMPTY_SIZE;
7617
7618                         /*
7619                          * Do not bail out on ENOSPC since we
7620                          * can do more things.
7621                          */
7622                         if (ret < 0 && ret != -ENOSPC)
7623                                 btrfs_abort_transaction(trans,
7624                                                         root, ret);
7625                         else
7626                                 ret = 0;
7627                         if (!exist)
7628                                 btrfs_end_transaction(trans, root);
7629                         if (ret)
7630                                 goto out;
7631                 }
7632
7633                 if (loop == LOOP_NO_EMPTY_SIZE) {
7634                         /*
7635                          * Don't loop again if we already have no empty_size and
7636                          * no empty_cluster.
7637                          */
7638                         if (empty_size == 0 &&
7639                             empty_cluster == 0) {
7640                                 ret = -ENOSPC;
7641                                 goto out;
7642                         }
7643                         empty_size = 0;
7644                         empty_cluster = 0;
7645                 }
7646
7647                 goto search;
7648         } else if (!ins->objectid) {
7649                 ret = -ENOSPC;
7650         } else if (ins->objectid) {
7651                 if (!use_cluster && last_ptr) {
7652                         spin_lock(&last_ptr->lock);
7653                         last_ptr->window_start = ins->objectid;
7654                         spin_unlock(&last_ptr->lock);
7655                 }
7656                 ret = 0;
7657         }
7658 out:
7659         if (ret == -ENOSPC) {
7660                 spin_lock(&space_info->lock);
7661                 space_info->max_extent_size = max_extent_size;
7662                 spin_unlock(&space_info->lock);
7663                 ins->offset = max_extent_size;
7664         }
7665         return ret;
7666 }
7667
7668 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7669                             int dump_block_groups)
7670 {
7671         struct btrfs_block_group_cache *cache;
7672         int index = 0;
7673
7674         spin_lock(&info->lock);
7675         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7676                info->flags,
7677                info->total_bytes - info->bytes_used - info->bytes_pinned -
7678                info->bytes_reserved - info->bytes_readonly,
7679                (info->full) ? "" : "not ");
7680         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7681                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7682                info->total_bytes, info->bytes_used, info->bytes_pinned,
7683                info->bytes_reserved, info->bytes_may_use,
7684                info->bytes_readonly);
7685         spin_unlock(&info->lock);
7686
7687         if (!dump_block_groups)
7688                 return;
7689
7690         down_read(&info->groups_sem);
7691 again:
7692         list_for_each_entry(cache, &info->block_groups[index], list) {
7693                 spin_lock(&cache->lock);
7694                 printk(KERN_INFO "BTRFS: "
7695                            "block group %llu has %llu bytes, "
7696                            "%llu used %llu pinned %llu reserved %s\n",
7697                        cache->key.objectid, cache->key.offset,
7698                        btrfs_block_group_used(&cache->item), cache->pinned,
7699                        cache->reserved, cache->ro ? "[readonly]" : "");
7700                 btrfs_dump_free_space(cache, bytes);
7701                 spin_unlock(&cache->lock);
7702         }
7703         if (++index < BTRFS_NR_RAID_TYPES)
7704                 goto again;
7705         up_read(&info->groups_sem);
7706 }
7707
7708 int btrfs_reserve_extent(struct btrfs_root *root,
7709                          u64 num_bytes, u64 min_alloc_size,
7710                          u64 empty_size, u64 hint_byte,
7711                          struct btrfs_key *ins, int is_data, int delalloc)
7712 {
7713         bool final_tried = num_bytes == min_alloc_size;
7714         u64 flags;
7715         int ret;
7716
7717         flags = btrfs_get_alloc_profile(root, is_data);
7718 again:
7719         WARN_ON(num_bytes < root->sectorsize);
7720         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7721                                flags, delalloc);
7722         if (!ret && !is_data) {
7723                 btrfs_dec_block_group_reservations(root->fs_info,
7724                                                    ins->objectid);
7725         } else if (ret == -ENOSPC) {
7726                 if (!final_tried && ins->offset) {
7727                         num_bytes = min(num_bytes >> 1, ins->offset);
7728                         num_bytes = round_down(num_bytes, root->sectorsize);
7729                         num_bytes = max(num_bytes, min_alloc_size);
7730                         if (num_bytes == min_alloc_size)
7731                                 final_tried = true;
7732                         goto again;
7733                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7734                         struct btrfs_space_info *sinfo;
7735
7736                         sinfo = __find_space_info(root->fs_info, flags);
7737                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7738                                 flags, num_bytes);
7739                         if (sinfo)
7740                                 dump_space_info(sinfo, num_bytes, 1);
7741                 }
7742         }
7743
7744         return ret;
7745 }
7746
7747 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7748                                         u64 start, u64 len,
7749                                         int pin, int delalloc)
7750 {
7751         struct btrfs_block_group_cache *cache;
7752         int ret = 0;
7753
7754         cache = btrfs_lookup_block_group(root->fs_info, start);
7755         if (!cache) {
7756                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7757                         start);
7758                 return -ENOSPC;
7759         }
7760
7761         if (pin)
7762                 pin_down_extent(root, cache, start, len, 1);
7763         else {
7764                 if (btrfs_test_opt(root, DISCARD))
7765                         ret = btrfs_discard_extent(root, start, len, NULL);
7766                 btrfs_add_free_space(cache, start, len);
7767                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7768         }
7769
7770         btrfs_put_block_group(cache);
7771
7772         trace_btrfs_reserved_extent_free(root, start, len);
7773
7774         return ret;
7775 }
7776
7777 int btrfs_free_reserved_extent(struct btrfs_root *root,
7778                                u64 start, u64 len, int delalloc)
7779 {
7780         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7781 }
7782
7783 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7784                                        u64 start, u64 len)
7785 {
7786         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7787 }
7788
7789 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7790                                       struct btrfs_root *root,
7791                                       u64 parent, u64 root_objectid,
7792                                       u64 flags, u64 owner, u64 offset,
7793                                       struct btrfs_key *ins, int ref_mod)
7794 {
7795         int ret;
7796         struct btrfs_fs_info *fs_info = root->fs_info;
7797         struct btrfs_extent_item *extent_item;
7798         struct btrfs_extent_inline_ref *iref;
7799         struct btrfs_path *path;
7800         struct extent_buffer *leaf;
7801         int type;
7802         u32 size;
7803
7804         if (parent > 0)
7805                 type = BTRFS_SHARED_DATA_REF_KEY;
7806         else
7807                 type = BTRFS_EXTENT_DATA_REF_KEY;
7808
7809         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7810
7811         path = btrfs_alloc_path();
7812         if (!path)
7813                 return -ENOMEM;
7814
7815         path->leave_spinning = 1;
7816         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7817                                       ins, size);
7818         if (ret) {
7819                 btrfs_free_path(path);
7820                 return ret;
7821         }
7822
7823         leaf = path->nodes[0];
7824         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7825                                      struct btrfs_extent_item);
7826         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7827         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7828         btrfs_set_extent_flags(leaf, extent_item,
7829                                flags | BTRFS_EXTENT_FLAG_DATA);
7830
7831         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7832         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7833         if (parent > 0) {
7834                 struct btrfs_shared_data_ref *ref;
7835                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7836                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7837                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7838         } else {
7839                 struct btrfs_extent_data_ref *ref;
7840                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7841                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7842                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7843                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7844                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7845         }
7846
7847         btrfs_mark_buffer_dirty(path->nodes[0]);
7848         btrfs_free_path(path);
7849
7850         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
7851                                           ins->offset);
7852         if (ret)
7853                 return ret;
7854
7855         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7856         if (ret) { /* -ENOENT, logic error */
7857                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7858                         ins->objectid, ins->offset);
7859                 BUG();
7860         }
7861         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7862         return ret;
7863 }
7864
7865 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7866                                      struct btrfs_root *root,
7867                                      u64 parent, u64 root_objectid,
7868                                      u64 flags, struct btrfs_disk_key *key,
7869                                      int level, struct btrfs_key *ins)
7870 {
7871         int ret;
7872         struct btrfs_fs_info *fs_info = root->fs_info;
7873         struct btrfs_extent_item *extent_item;
7874         struct btrfs_tree_block_info *block_info;
7875         struct btrfs_extent_inline_ref *iref;
7876         struct btrfs_path *path;
7877         struct extent_buffer *leaf;
7878         u32 size = sizeof(*extent_item) + sizeof(*iref);
7879         u64 num_bytes = ins->offset;
7880         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7881                                                  SKINNY_METADATA);
7882
7883         if (!skinny_metadata)
7884                 size += sizeof(*block_info);
7885
7886         path = btrfs_alloc_path();
7887         if (!path) {
7888                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7889                                                    root->nodesize);
7890                 return -ENOMEM;
7891         }
7892
7893         path->leave_spinning = 1;
7894         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7895                                       ins, size);
7896         if (ret) {
7897                 btrfs_free_path(path);
7898                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7899                                                    root->nodesize);
7900                 return ret;
7901         }
7902
7903         leaf = path->nodes[0];
7904         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7905                                      struct btrfs_extent_item);
7906         btrfs_set_extent_refs(leaf, extent_item, 1);
7907         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7908         btrfs_set_extent_flags(leaf, extent_item,
7909                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7910
7911         if (skinny_metadata) {
7912                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7913                 num_bytes = root->nodesize;
7914         } else {
7915                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7916                 btrfs_set_tree_block_key(leaf, block_info, key);
7917                 btrfs_set_tree_block_level(leaf, block_info, level);
7918                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7919         }
7920
7921         if (parent > 0) {
7922                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7923                 btrfs_set_extent_inline_ref_type(leaf, iref,
7924                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7925                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7926         } else {
7927                 btrfs_set_extent_inline_ref_type(leaf, iref,
7928                                                  BTRFS_TREE_BLOCK_REF_KEY);
7929                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7930         }
7931
7932         btrfs_mark_buffer_dirty(leaf);
7933         btrfs_free_path(path);
7934
7935         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
7936                                           num_bytes);
7937         if (ret)
7938                 return ret;
7939
7940         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7941                                  1);
7942         if (ret) { /* -ENOENT, logic error */
7943                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7944                         ins->objectid, ins->offset);
7945                 BUG();
7946         }
7947
7948         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7949         return ret;
7950 }
7951
7952 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7953                                      struct btrfs_root *root,
7954                                      u64 root_objectid, u64 owner,
7955                                      u64 offset, u64 ram_bytes,
7956                                      struct btrfs_key *ins)
7957 {
7958         int ret;
7959
7960         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7961
7962         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7963                                          ins->offset, 0,
7964                                          root_objectid, owner, offset,
7965                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7966                                          NULL);
7967         return ret;
7968 }
7969
7970 /*
7971  * this is used by the tree logging recovery code.  It records that
7972  * an extent has been allocated and makes sure to clear the free
7973  * space cache bits as well
7974  */
7975 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7976                                    struct btrfs_root *root,
7977                                    u64 root_objectid, u64 owner, u64 offset,
7978                                    struct btrfs_key *ins)
7979 {
7980         int ret;
7981         struct btrfs_block_group_cache *block_group;
7982
7983         /*
7984          * Mixed block groups will exclude before processing the log so we only
7985          * need to do the exclude dance if this fs isn't mixed.
7986          */
7987         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7988                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7989                 if (ret)
7990                         return ret;
7991         }
7992
7993         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7994         if (!block_group)
7995                 return -EINVAL;
7996
7997         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7998                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7999         BUG_ON(ret); /* logic error */
8000         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
8001                                          0, owner, offset, ins, 1);
8002         btrfs_put_block_group(block_group);
8003         return ret;
8004 }
8005
8006 static struct extent_buffer *
8007 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8008                       u64 bytenr, int level)
8009 {
8010         struct extent_buffer *buf;
8011
8012         buf = btrfs_find_create_tree_block(root, bytenr);
8013         if (!buf)
8014                 return ERR_PTR(-ENOMEM);
8015         btrfs_set_header_generation(buf, trans->transid);
8016         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8017         btrfs_tree_lock(buf);
8018         clean_tree_block(trans, root->fs_info, buf);
8019         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8020
8021         btrfs_set_lock_blocking(buf);
8022         set_extent_buffer_uptodate(buf);
8023
8024         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8025                 buf->log_index = root->log_transid % 2;
8026                 /*
8027                  * we allow two log transactions at a time, use different
8028                  * EXENT bit to differentiate dirty pages.
8029                  */
8030                 if (buf->log_index == 0)
8031                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8032                                         buf->start + buf->len - 1, GFP_NOFS);
8033                 else
8034                         set_extent_new(&root->dirty_log_pages, buf->start,
8035                                         buf->start + buf->len - 1);
8036         } else {
8037                 buf->log_index = -1;
8038                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8039                          buf->start + buf->len - 1, GFP_NOFS);
8040         }
8041         trans->blocks_used++;
8042         /* this returns a buffer locked for blocking */
8043         return buf;
8044 }
8045
8046 static struct btrfs_block_rsv *
8047 use_block_rsv(struct btrfs_trans_handle *trans,
8048               struct btrfs_root *root, u32 blocksize)
8049 {
8050         struct btrfs_block_rsv *block_rsv;
8051         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
8052         int ret;
8053         bool global_updated = false;
8054
8055         block_rsv = get_block_rsv(trans, root);
8056
8057         if (unlikely(block_rsv->size == 0))
8058                 goto try_reserve;
8059 again:
8060         ret = block_rsv_use_bytes(block_rsv, blocksize);
8061         if (!ret)
8062                 return block_rsv;
8063
8064         if (block_rsv->failfast)
8065                 return ERR_PTR(ret);
8066
8067         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8068                 global_updated = true;
8069                 update_global_block_rsv(root->fs_info);
8070                 goto again;
8071         }
8072
8073         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
8074                 static DEFINE_RATELIMIT_STATE(_rs,
8075                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8076                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8077                 if (__ratelimit(&_rs))
8078                         WARN(1, KERN_DEBUG
8079                                 "BTRFS: block rsv returned %d\n", ret);
8080         }
8081 try_reserve:
8082         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8083                                      BTRFS_RESERVE_NO_FLUSH);
8084         if (!ret)
8085                 return block_rsv;
8086         /*
8087          * If we couldn't reserve metadata bytes try and use some from
8088          * the global reserve if its space type is the same as the global
8089          * reservation.
8090          */
8091         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8092             block_rsv->space_info == global_rsv->space_info) {
8093                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8094                 if (!ret)
8095                         return global_rsv;
8096         }
8097         return ERR_PTR(ret);
8098 }
8099
8100 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8101                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8102 {
8103         block_rsv_add_bytes(block_rsv, blocksize, 0);
8104         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8105 }
8106
8107 /*
8108  * finds a free extent and does all the dirty work required for allocation
8109  * returns the tree buffer or an ERR_PTR on error.
8110  */
8111 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8112                                         struct btrfs_root *root,
8113                                         u64 parent, u64 root_objectid,
8114                                         struct btrfs_disk_key *key, int level,
8115                                         u64 hint, u64 empty_size)
8116 {
8117         struct btrfs_key ins;
8118         struct btrfs_block_rsv *block_rsv;
8119         struct extent_buffer *buf;
8120         struct btrfs_delayed_extent_op *extent_op;
8121         u64 flags = 0;
8122         int ret;
8123         u32 blocksize = root->nodesize;
8124         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8125                                                  SKINNY_METADATA);
8126
8127         if (btrfs_test_is_dummy_root(root)) {
8128                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8129                                             level);
8130                 if (!IS_ERR(buf))
8131                         root->alloc_bytenr += blocksize;
8132                 return buf;
8133         }
8134
8135         block_rsv = use_block_rsv(trans, root, blocksize);
8136         if (IS_ERR(block_rsv))
8137                 return ERR_CAST(block_rsv);
8138
8139         ret = btrfs_reserve_extent(root, blocksize, blocksize,
8140                                    empty_size, hint, &ins, 0, 0);
8141         if (ret)
8142                 goto out_unuse;
8143
8144         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8145         if (IS_ERR(buf)) {
8146                 ret = PTR_ERR(buf);
8147                 goto out_free_reserved;
8148         }
8149
8150         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8151                 if (parent == 0)
8152                         parent = ins.objectid;
8153                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8154         } else
8155                 BUG_ON(parent > 0);
8156
8157         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8158                 extent_op = btrfs_alloc_delayed_extent_op();
8159                 if (!extent_op) {
8160                         ret = -ENOMEM;
8161                         goto out_free_buf;
8162                 }
8163                 if (key)
8164                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8165                 else
8166                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8167                 extent_op->flags_to_set = flags;
8168                 extent_op->update_key = skinny_metadata ? false : true;
8169                 extent_op->update_flags = true;
8170                 extent_op->is_data = false;
8171                 extent_op->level = level;
8172
8173                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8174                                                  ins.objectid, ins.offset,
8175                                                  parent, root_objectid, level,
8176                                                  BTRFS_ADD_DELAYED_EXTENT,
8177                                                  extent_op);
8178                 if (ret)
8179                         goto out_free_delayed;
8180         }
8181         return buf;
8182
8183 out_free_delayed:
8184         btrfs_free_delayed_extent_op(extent_op);
8185 out_free_buf:
8186         free_extent_buffer(buf);
8187 out_free_reserved:
8188         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8189 out_unuse:
8190         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8191         return ERR_PTR(ret);
8192 }
8193
8194 struct walk_control {
8195         u64 refs[BTRFS_MAX_LEVEL];
8196         u64 flags[BTRFS_MAX_LEVEL];
8197         struct btrfs_key update_progress;
8198         int stage;
8199         int level;
8200         int shared_level;
8201         int update_ref;
8202         int keep_locks;
8203         int reada_slot;
8204         int reada_count;
8205         int for_reloc;
8206 };
8207
8208 #define DROP_REFERENCE  1
8209 #define UPDATE_BACKREF  2
8210
8211 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8212                                      struct btrfs_root *root,
8213                                      struct walk_control *wc,
8214                                      struct btrfs_path *path)
8215 {
8216         u64 bytenr;
8217         u64 generation;
8218         u64 refs;
8219         u64 flags;
8220         u32 nritems;
8221         u32 blocksize;
8222         struct btrfs_key key;
8223         struct extent_buffer *eb;
8224         int ret;
8225         int slot;
8226         int nread = 0;
8227
8228         if (path->slots[wc->level] < wc->reada_slot) {
8229                 wc->reada_count = wc->reada_count * 2 / 3;
8230                 wc->reada_count = max(wc->reada_count, 2);
8231         } else {
8232                 wc->reada_count = wc->reada_count * 3 / 2;
8233                 wc->reada_count = min_t(int, wc->reada_count,
8234                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8235         }
8236
8237         eb = path->nodes[wc->level];
8238         nritems = btrfs_header_nritems(eb);
8239         blocksize = root->nodesize;
8240
8241         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8242                 if (nread >= wc->reada_count)
8243                         break;
8244
8245                 cond_resched();
8246                 bytenr = btrfs_node_blockptr(eb, slot);
8247                 generation = btrfs_node_ptr_generation(eb, slot);
8248
8249                 if (slot == path->slots[wc->level])
8250                         goto reada;
8251
8252                 if (wc->stage == UPDATE_BACKREF &&
8253                     generation <= root->root_key.offset)
8254                         continue;
8255
8256                 /* We don't lock the tree block, it's OK to be racy here */
8257                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8258                                                wc->level - 1, 1, &refs,
8259                                                &flags);
8260                 /* We don't care about errors in readahead. */
8261                 if (ret < 0)
8262                         continue;
8263                 BUG_ON(refs == 0);
8264
8265                 if (wc->stage == DROP_REFERENCE) {
8266                         if (refs == 1)
8267                                 goto reada;
8268
8269                         if (wc->level == 1 &&
8270                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8271                                 continue;
8272                         if (!wc->update_ref ||
8273                             generation <= root->root_key.offset)
8274                                 continue;
8275                         btrfs_node_key_to_cpu(eb, &key, slot);
8276                         ret = btrfs_comp_cpu_keys(&key,
8277                                                   &wc->update_progress);
8278                         if (ret < 0)
8279                                 continue;
8280                 } else {
8281                         if (wc->level == 1 &&
8282                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8283                                 continue;
8284                 }
8285 reada:
8286                 readahead_tree_block(root, bytenr);
8287                 nread++;
8288         }
8289         wc->reada_slot = slot;
8290 }
8291
8292 /*
8293  * These may not be seen by the usual inc/dec ref code so we have to
8294  * add them here.
8295  */
8296 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8297                                      struct btrfs_root *root, u64 bytenr,
8298                                      u64 num_bytes)
8299 {
8300         struct btrfs_qgroup_extent_record *qrecord;
8301         struct btrfs_delayed_ref_root *delayed_refs;
8302
8303         qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8304         if (!qrecord)
8305                 return -ENOMEM;
8306
8307         qrecord->bytenr = bytenr;
8308         qrecord->num_bytes = num_bytes;
8309         qrecord->old_roots = NULL;
8310
8311         delayed_refs = &trans->transaction->delayed_refs;
8312         spin_lock(&delayed_refs->lock);
8313         if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8314                 kfree(qrecord);
8315         spin_unlock(&delayed_refs->lock);
8316
8317         return 0;
8318 }
8319
8320 static int account_leaf_items(struct btrfs_trans_handle *trans,
8321                               struct btrfs_root *root,
8322                               struct extent_buffer *eb)
8323 {
8324         int nr = btrfs_header_nritems(eb);
8325         int i, extent_type, ret;
8326         struct btrfs_key key;
8327         struct btrfs_file_extent_item *fi;
8328         u64 bytenr, num_bytes;
8329
8330         /* We can be called directly from walk_up_proc() */
8331         if (!root->fs_info->quota_enabled)
8332                 return 0;
8333
8334         for (i = 0; i < nr; i++) {
8335                 btrfs_item_key_to_cpu(eb, &key, i);
8336
8337                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8338                         continue;
8339
8340                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8341                 /* filter out non qgroup-accountable extents  */
8342                 extent_type = btrfs_file_extent_type(eb, fi);
8343
8344                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8345                         continue;
8346
8347                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8348                 if (!bytenr)
8349                         continue;
8350
8351                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8352
8353                 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8354                 if (ret)
8355                         return ret;
8356         }
8357         return 0;
8358 }
8359
8360 /*
8361  * Walk up the tree from the bottom, freeing leaves and any interior
8362  * nodes which have had all slots visited. If a node (leaf or
8363  * interior) is freed, the node above it will have it's slot
8364  * incremented. The root node will never be freed.
8365  *
8366  * At the end of this function, we should have a path which has all
8367  * slots incremented to the next position for a search. If we need to
8368  * read a new node it will be NULL and the node above it will have the
8369  * correct slot selected for a later read.
8370  *
8371  * If we increment the root nodes slot counter past the number of
8372  * elements, 1 is returned to signal completion of the search.
8373  */
8374 static int adjust_slots_upwards(struct btrfs_root *root,
8375                                 struct btrfs_path *path, int root_level)
8376 {
8377         int level = 0;
8378         int nr, slot;
8379         struct extent_buffer *eb;
8380
8381         if (root_level == 0)
8382                 return 1;
8383
8384         while (level <= root_level) {
8385                 eb = path->nodes[level];
8386                 nr = btrfs_header_nritems(eb);
8387                 path->slots[level]++;
8388                 slot = path->slots[level];
8389                 if (slot >= nr || level == 0) {
8390                         /*
8391                          * Don't free the root -  we will detect this
8392                          * condition after our loop and return a
8393                          * positive value for caller to stop walking the tree.
8394                          */
8395                         if (level != root_level) {
8396                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8397                                 path->locks[level] = 0;
8398
8399                                 free_extent_buffer(eb);
8400                                 path->nodes[level] = NULL;
8401                                 path->slots[level] = 0;
8402                         }
8403                 } else {
8404                         /*
8405                          * We have a valid slot to walk back down
8406                          * from. Stop here so caller can process these
8407                          * new nodes.
8408                          */
8409                         break;
8410                 }
8411
8412                 level++;
8413         }
8414
8415         eb = path->nodes[root_level];
8416         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8417                 return 1;
8418
8419         return 0;
8420 }
8421
8422 /*
8423  * root_eb is the subtree root and is locked before this function is called.
8424  */
8425 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8426                                   struct btrfs_root *root,
8427                                   struct extent_buffer *root_eb,
8428                                   u64 root_gen,
8429                                   int root_level)
8430 {
8431         int ret = 0;
8432         int level;
8433         struct extent_buffer *eb = root_eb;
8434         struct btrfs_path *path = NULL;
8435
8436         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8437         BUG_ON(root_eb == NULL);
8438
8439         if (!root->fs_info->quota_enabled)
8440                 return 0;
8441
8442         if (!extent_buffer_uptodate(root_eb)) {
8443                 ret = btrfs_read_buffer(root_eb, root_gen);
8444                 if (ret)
8445                         goto out;
8446         }
8447
8448         if (root_level == 0) {
8449                 ret = account_leaf_items(trans, root, root_eb);
8450                 goto out;
8451         }
8452
8453         path = btrfs_alloc_path();
8454         if (!path)
8455                 return -ENOMEM;
8456
8457         /*
8458          * Walk down the tree.  Missing extent blocks are filled in as
8459          * we go. Metadata is accounted every time we read a new
8460          * extent block.
8461          *
8462          * When we reach a leaf, we account for file extent items in it,
8463          * walk back up the tree (adjusting slot pointers as we go)
8464          * and restart the search process.
8465          */
8466         extent_buffer_get(root_eb); /* For path */
8467         path->nodes[root_level] = root_eb;
8468         path->slots[root_level] = 0;
8469         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8470 walk_down:
8471         level = root_level;
8472         while (level >= 0) {
8473                 if (path->nodes[level] == NULL) {
8474                         int parent_slot;
8475                         u64 child_gen;
8476                         u64 child_bytenr;
8477
8478                         /* We need to get child blockptr/gen from
8479                          * parent before we can read it. */
8480                         eb = path->nodes[level + 1];
8481                         parent_slot = path->slots[level + 1];
8482                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8483                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8484
8485                         eb = read_tree_block(root, child_bytenr, child_gen);
8486                         if (IS_ERR(eb)) {
8487                                 ret = PTR_ERR(eb);
8488                                 goto out;
8489                         } else if (!extent_buffer_uptodate(eb)) {
8490                                 free_extent_buffer(eb);
8491                                 ret = -EIO;
8492                                 goto out;
8493                         }
8494
8495                         path->nodes[level] = eb;
8496                         path->slots[level] = 0;
8497
8498                         btrfs_tree_read_lock(eb);
8499                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8500                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8501
8502                         ret = record_one_subtree_extent(trans, root, child_bytenr,
8503                                                         root->nodesize);
8504                         if (ret)
8505                                 goto out;
8506                 }
8507
8508                 if (level == 0) {
8509                         ret = account_leaf_items(trans, root, path->nodes[level]);
8510                         if (ret)
8511                                 goto out;
8512
8513                         /* Nonzero return here means we completed our search */
8514                         ret = adjust_slots_upwards(root, path, root_level);
8515                         if (ret)
8516                                 break;
8517
8518                         /* Restart search with new slots */
8519                         goto walk_down;
8520                 }
8521
8522                 level--;
8523         }
8524
8525         ret = 0;
8526 out:
8527         btrfs_free_path(path);
8528
8529         return ret;
8530 }
8531
8532 /*
8533  * helper to process tree block while walking down the tree.
8534  *
8535  * when wc->stage == UPDATE_BACKREF, this function updates
8536  * back refs for pointers in the block.
8537  *
8538  * NOTE: return value 1 means we should stop walking down.
8539  */
8540 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8541                                    struct btrfs_root *root,
8542                                    struct btrfs_path *path,
8543                                    struct walk_control *wc, int lookup_info)
8544 {
8545         int level = wc->level;
8546         struct extent_buffer *eb = path->nodes[level];
8547         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8548         int ret;
8549
8550         if (wc->stage == UPDATE_BACKREF &&
8551             btrfs_header_owner(eb) != root->root_key.objectid)
8552                 return 1;
8553
8554         /*
8555          * when reference count of tree block is 1, it won't increase
8556          * again. once full backref flag is set, we never clear it.
8557          */
8558         if (lookup_info &&
8559             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8560              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8561                 BUG_ON(!path->locks[level]);
8562                 ret = btrfs_lookup_extent_info(trans, root,
8563                                                eb->start, level, 1,
8564                                                &wc->refs[level],
8565                                                &wc->flags[level]);
8566                 BUG_ON(ret == -ENOMEM);
8567                 if (ret)
8568                         return ret;
8569                 BUG_ON(wc->refs[level] == 0);
8570         }
8571
8572         if (wc->stage == DROP_REFERENCE) {
8573                 if (wc->refs[level] > 1)
8574                         return 1;
8575
8576                 if (path->locks[level] && !wc->keep_locks) {
8577                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8578                         path->locks[level] = 0;
8579                 }
8580                 return 0;
8581         }
8582
8583         /* wc->stage == UPDATE_BACKREF */
8584         if (!(wc->flags[level] & flag)) {
8585                 BUG_ON(!path->locks[level]);
8586                 ret = btrfs_inc_ref(trans, root, eb, 1);
8587                 BUG_ON(ret); /* -ENOMEM */
8588                 ret = btrfs_dec_ref(trans, root, eb, 0);
8589                 BUG_ON(ret); /* -ENOMEM */
8590                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8591                                                   eb->len, flag,
8592                                                   btrfs_header_level(eb), 0);
8593                 BUG_ON(ret); /* -ENOMEM */
8594                 wc->flags[level] |= flag;
8595         }
8596
8597         /*
8598          * the block is shared by multiple trees, so it's not good to
8599          * keep the tree lock
8600          */
8601         if (path->locks[level] && level > 0) {
8602                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8603                 path->locks[level] = 0;
8604         }
8605         return 0;
8606 }
8607
8608 /*
8609  * helper to process tree block pointer.
8610  *
8611  * when wc->stage == DROP_REFERENCE, this function checks
8612  * reference count of the block pointed to. if the block
8613  * is shared and we need update back refs for the subtree
8614  * rooted at the block, this function changes wc->stage to
8615  * UPDATE_BACKREF. if the block is shared and there is no
8616  * need to update back, this function drops the reference
8617  * to the block.
8618  *
8619  * NOTE: return value 1 means we should stop walking down.
8620  */
8621 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8622                                  struct btrfs_root *root,
8623                                  struct btrfs_path *path,
8624                                  struct walk_control *wc, int *lookup_info)
8625 {
8626         u64 bytenr;
8627         u64 generation;
8628         u64 parent;
8629         u32 blocksize;
8630         struct btrfs_key key;
8631         struct extent_buffer *next;
8632         int level = wc->level;
8633         int reada = 0;
8634         int ret = 0;
8635         bool need_account = false;
8636
8637         generation = btrfs_node_ptr_generation(path->nodes[level],
8638                                                path->slots[level]);
8639         /*
8640          * if the lower level block was created before the snapshot
8641          * was created, we know there is no need to update back refs
8642          * for the subtree
8643          */
8644         if (wc->stage == UPDATE_BACKREF &&
8645             generation <= root->root_key.offset) {
8646                 *lookup_info = 1;
8647                 return 1;
8648         }
8649
8650         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8651         blocksize = root->nodesize;
8652
8653         next = btrfs_find_tree_block(root->fs_info, bytenr);
8654         if (!next) {
8655                 next = btrfs_find_create_tree_block(root, bytenr);
8656                 if (!next)
8657                         return -ENOMEM;
8658                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8659                                                level - 1);
8660                 reada = 1;
8661         }
8662         btrfs_tree_lock(next);
8663         btrfs_set_lock_blocking(next);
8664
8665         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8666                                        &wc->refs[level - 1],
8667                                        &wc->flags[level - 1]);
8668         if (ret < 0) {
8669                 btrfs_tree_unlock(next);
8670                 return ret;
8671         }
8672
8673         if (unlikely(wc->refs[level - 1] == 0)) {
8674                 btrfs_err(root->fs_info, "Missing references.");
8675                 BUG();
8676         }
8677         *lookup_info = 0;
8678
8679         if (wc->stage == DROP_REFERENCE) {
8680                 if (wc->refs[level - 1] > 1) {
8681                         need_account = true;
8682                         if (level == 1 &&
8683                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8684                                 goto skip;
8685
8686                         if (!wc->update_ref ||
8687                             generation <= root->root_key.offset)
8688                                 goto skip;
8689
8690                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8691                                               path->slots[level]);
8692                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8693                         if (ret < 0)
8694                                 goto skip;
8695
8696                         wc->stage = UPDATE_BACKREF;
8697                         wc->shared_level = level - 1;
8698                 }
8699         } else {
8700                 if (level == 1 &&
8701                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8702                         goto skip;
8703         }
8704
8705         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8706                 btrfs_tree_unlock(next);
8707                 free_extent_buffer(next);
8708                 next = NULL;
8709                 *lookup_info = 1;
8710         }
8711
8712         if (!next) {
8713                 if (reada && level == 1)
8714                         reada_walk_down(trans, root, wc, path);
8715                 next = read_tree_block(root, bytenr, generation);
8716                 if (IS_ERR(next)) {
8717                         return PTR_ERR(next);
8718                 } else if (!extent_buffer_uptodate(next)) {
8719                         free_extent_buffer(next);
8720                         return -EIO;
8721                 }
8722                 btrfs_tree_lock(next);
8723                 btrfs_set_lock_blocking(next);
8724         }
8725
8726         level--;
8727         BUG_ON(level != btrfs_header_level(next));
8728         path->nodes[level] = next;
8729         path->slots[level] = 0;
8730         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8731         wc->level = level;
8732         if (wc->level == 1)
8733                 wc->reada_slot = 0;
8734         return 0;
8735 skip:
8736         wc->refs[level - 1] = 0;
8737         wc->flags[level - 1] = 0;
8738         if (wc->stage == DROP_REFERENCE) {
8739                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8740                         parent = path->nodes[level]->start;
8741                 } else {
8742                         BUG_ON(root->root_key.objectid !=
8743                                btrfs_header_owner(path->nodes[level]));
8744                         parent = 0;
8745                 }
8746
8747                 if (need_account) {
8748                         ret = account_shared_subtree(trans, root, next,
8749                                                      generation, level - 1);
8750                         if (ret) {
8751                                 btrfs_err_rl(root->fs_info,
8752                                         "Error "
8753                                         "%d accounting shared subtree. Quota "
8754                                         "is out of sync, rescan required.",
8755                                         ret);
8756                         }
8757                 }
8758                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8759                                 root->root_key.objectid, level - 1, 0);
8760                 BUG_ON(ret); /* -ENOMEM */
8761         }
8762         btrfs_tree_unlock(next);
8763         free_extent_buffer(next);
8764         *lookup_info = 1;
8765         return 1;
8766 }
8767
8768 /*
8769  * helper to process tree block while walking up the tree.
8770  *
8771  * when wc->stage == DROP_REFERENCE, this function drops
8772  * reference count on the block.
8773  *
8774  * when wc->stage == UPDATE_BACKREF, this function changes
8775  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8776  * to UPDATE_BACKREF previously while processing the block.
8777  *
8778  * NOTE: return value 1 means we should stop walking up.
8779  */
8780 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8781                                  struct btrfs_root *root,
8782                                  struct btrfs_path *path,
8783                                  struct walk_control *wc)
8784 {
8785         int ret;
8786         int level = wc->level;
8787         struct extent_buffer *eb = path->nodes[level];
8788         u64 parent = 0;
8789
8790         if (wc->stage == UPDATE_BACKREF) {
8791                 BUG_ON(wc->shared_level < level);
8792                 if (level < wc->shared_level)
8793                         goto out;
8794
8795                 ret = find_next_key(path, level + 1, &wc->update_progress);
8796                 if (ret > 0)
8797                         wc->update_ref = 0;
8798
8799                 wc->stage = DROP_REFERENCE;
8800                 wc->shared_level = -1;
8801                 path->slots[level] = 0;
8802
8803                 /*
8804                  * check reference count again if the block isn't locked.
8805                  * we should start walking down the tree again if reference
8806                  * count is one.
8807                  */
8808                 if (!path->locks[level]) {
8809                         BUG_ON(level == 0);
8810                         btrfs_tree_lock(eb);
8811                         btrfs_set_lock_blocking(eb);
8812                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8813
8814                         ret = btrfs_lookup_extent_info(trans, root,
8815                                                        eb->start, level, 1,
8816                                                        &wc->refs[level],
8817                                                        &wc->flags[level]);
8818                         if (ret < 0) {
8819                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8820                                 path->locks[level] = 0;
8821                                 return ret;
8822                         }
8823                         BUG_ON(wc->refs[level] == 0);
8824                         if (wc->refs[level] == 1) {
8825                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8826                                 path->locks[level] = 0;
8827                                 return 1;
8828                         }
8829                 }
8830         }
8831
8832         /* wc->stage == DROP_REFERENCE */
8833         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8834
8835         if (wc->refs[level] == 1) {
8836                 if (level == 0) {
8837                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8838                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8839                         else
8840                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8841                         BUG_ON(ret); /* -ENOMEM */
8842                         ret = account_leaf_items(trans, root, eb);
8843                         if (ret) {
8844                                 btrfs_err_rl(root->fs_info,
8845                                         "error "
8846                                         "%d accounting leaf items. Quota "
8847                                         "is out of sync, rescan required.",
8848                                         ret);
8849                         }
8850                 }
8851                 /* make block locked assertion in clean_tree_block happy */
8852                 if (!path->locks[level] &&
8853                     btrfs_header_generation(eb) == trans->transid) {
8854                         btrfs_tree_lock(eb);
8855                         btrfs_set_lock_blocking(eb);
8856                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8857                 }
8858                 clean_tree_block(trans, root->fs_info, eb);
8859         }
8860
8861         if (eb == root->node) {
8862                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8863                         parent = eb->start;
8864                 else
8865                         BUG_ON(root->root_key.objectid !=
8866                                btrfs_header_owner(eb));
8867         } else {
8868                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8869                         parent = path->nodes[level + 1]->start;
8870                 else
8871                         BUG_ON(root->root_key.objectid !=
8872                                btrfs_header_owner(path->nodes[level + 1]));
8873         }
8874
8875         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8876 out:
8877         wc->refs[level] = 0;
8878         wc->flags[level] = 0;
8879         return 0;
8880 }
8881
8882 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8883                                    struct btrfs_root *root,
8884                                    struct btrfs_path *path,
8885                                    struct walk_control *wc)
8886 {
8887         int level = wc->level;
8888         int lookup_info = 1;
8889         int ret;
8890
8891         while (level >= 0) {
8892                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8893                 if (ret > 0)
8894                         break;
8895
8896                 if (level == 0)
8897                         break;
8898
8899                 if (path->slots[level] >=
8900                     btrfs_header_nritems(path->nodes[level]))
8901                         break;
8902
8903                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8904                 if (ret > 0) {
8905                         path->slots[level]++;
8906                         continue;
8907                 } else if (ret < 0)
8908                         return ret;
8909                 level = wc->level;
8910         }
8911         return 0;
8912 }
8913
8914 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8915                                  struct btrfs_root *root,
8916                                  struct btrfs_path *path,
8917                                  struct walk_control *wc, int max_level)
8918 {
8919         int level = wc->level;
8920         int ret;
8921
8922         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8923         while (level < max_level && path->nodes[level]) {
8924                 wc->level = level;
8925                 if (path->slots[level] + 1 <
8926                     btrfs_header_nritems(path->nodes[level])) {
8927                         path->slots[level]++;
8928                         return 0;
8929                 } else {
8930                         ret = walk_up_proc(trans, root, path, wc);
8931                         if (ret > 0)
8932                                 return 0;
8933
8934                         if (path->locks[level]) {
8935                                 btrfs_tree_unlock_rw(path->nodes[level],
8936                                                      path->locks[level]);
8937                                 path->locks[level] = 0;
8938                         }
8939                         free_extent_buffer(path->nodes[level]);
8940                         path->nodes[level] = NULL;
8941                         level++;
8942                 }
8943         }
8944         return 1;
8945 }
8946
8947 /*
8948  * drop a subvolume tree.
8949  *
8950  * this function traverses the tree freeing any blocks that only
8951  * referenced by the tree.
8952  *
8953  * when a shared tree block is found. this function decreases its
8954  * reference count by one. if update_ref is true, this function
8955  * also make sure backrefs for the shared block and all lower level
8956  * blocks are properly updated.
8957  *
8958  * If called with for_reloc == 0, may exit early with -EAGAIN
8959  */
8960 int btrfs_drop_snapshot(struct btrfs_root *root,
8961                          struct btrfs_block_rsv *block_rsv, int update_ref,
8962                          int for_reloc)
8963 {
8964         struct btrfs_path *path;
8965         struct btrfs_trans_handle *trans;
8966         struct btrfs_root *tree_root = root->fs_info->tree_root;
8967         struct btrfs_root_item *root_item = &root->root_item;
8968         struct walk_control *wc;
8969         struct btrfs_key key;
8970         int err = 0;
8971         int ret;
8972         int level;
8973         bool root_dropped = false;
8974
8975         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8976
8977         path = btrfs_alloc_path();
8978         if (!path) {
8979                 err = -ENOMEM;
8980                 goto out;
8981         }
8982
8983         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8984         if (!wc) {
8985                 btrfs_free_path(path);
8986                 err = -ENOMEM;
8987                 goto out;
8988         }
8989
8990         trans = btrfs_start_transaction(tree_root, 0);
8991         if (IS_ERR(trans)) {
8992                 err = PTR_ERR(trans);
8993                 goto out_free;
8994         }
8995
8996         if (block_rsv)
8997                 trans->block_rsv = block_rsv;
8998
8999         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9000                 level = btrfs_header_level(root->node);
9001                 path->nodes[level] = btrfs_lock_root_node(root);
9002                 btrfs_set_lock_blocking(path->nodes[level]);
9003                 path->slots[level] = 0;
9004                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9005                 memset(&wc->update_progress, 0,
9006                        sizeof(wc->update_progress));
9007         } else {
9008                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9009                 memcpy(&wc->update_progress, &key,
9010                        sizeof(wc->update_progress));
9011
9012                 level = root_item->drop_level;
9013                 BUG_ON(level == 0);
9014                 path->lowest_level = level;
9015                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9016                 path->lowest_level = 0;
9017                 if (ret < 0) {
9018                         err = ret;
9019                         goto out_end_trans;
9020                 }
9021                 WARN_ON(ret > 0);
9022
9023                 /*
9024                  * unlock our path, this is safe because only this
9025                  * function is allowed to delete this snapshot
9026                  */
9027                 btrfs_unlock_up_safe(path, 0);
9028
9029                 level = btrfs_header_level(root->node);
9030                 while (1) {
9031                         btrfs_tree_lock(path->nodes[level]);
9032                         btrfs_set_lock_blocking(path->nodes[level]);
9033                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9034
9035                         ret = btrfs_lookup_extent_info(trans, root,
9036                                                 path->nodes[level]->start,
9037                                                 level, 1, &wc->refs[level],
9038                                                 &wc->flags[level]);
9039                         if (ret < 0) {
9040                                 err = ret;
9041                                 goto out_end_trans;
9042                         }
9043                         BUG_ON(wc->refs[level] == 0);
9044
9045                         if (level == root_item->drop_level)
9046                                 break;
9047
9048                         btrfs_tree_unlock(path->nodes[level]);
9049                         path->locks[level] = 0;
9050                         WARN_ON(wc->refs[level] != 1);
9051                         level--;
9052                 }
9053         }
9054
9055         wc->level = level;
9056         wc->shared_level = -1;
9057         wc->stage = DROP_REFERENCE;
9058         wc->update_ref = update_ref;
9059         wc->keep_locks = 0;
9060         wc->for_reloc = for_reloc;
9061         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9062
9063         while (1) {
9064
9065                 ret = walk_down_tree(trans, root, path, wc);
9066                 if (ret < 0) {
9067                         err = ret;
9068                         break;
9069                 }
9070
9071                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9072                 if (ret < 0) {
9073                         err = ret;
9074                         break;
9075                 }
9076
9077                 if (ret > 0) {
9078                         BUG_ON(wc->stage != DROP_REFERENCE);
9079                         break;
9080                 }
9081
9082                 if (wc->stage == DROP_REFERENCE) {
9083                         level = wc->level;
9084                         btrfs_node_key(path->nodes[level],
9085                                        &root_item->drop_progress,
9086                                        path->slots[level]);
9087                         root_item->drop_level = level;
9088                 }
9089
9090                 BUG_ON(wc->level == 0);
9091                 if (btrfs_should_end_transaction(trans, tree_root) ||
9092                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
9093                         ret = btrfs_update_root(trans, tree_root,
9094                                                 &root->root_key,
9095                                                 root_item);
9096                         if (ret) {
9097                                 btrfs_abort_transaction(trans, tree_root, ret);
9098                                 err = ret;
9099                                 goto out_end_trans;
9100                         }
9101
9102                         btrfs_end_transaction_throttle(trans, tree_root);
9103                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
9104                                 pr_debug("BTRFS: drop snapshot early exit\n");
9105                                 err = -EAGAIN;
9106                                 goto out_free;
9107                         }
9108
9109                         trans = btrfs_start_transaction(tree_root, 0);
9110                         if (IS_ERR(trans)) {
9111                                 err = PTR_ERR(trans);
9112                                 goto out_free;
9113                         }
9114                         if (block_rsv)
9115                                 trans->block_rsv = block_rsv;
9116                 }
9117         }
9118         btrfs_release_path(path);
9119         if (err)
9120                 goto out_end_trans;
9121
9122         ret = btrfs_del_root(trans, tree_root, &root->root_key);
9123         if (ret) {
9124                 btrfs_abort_transaction(trans, tree_root, ret);
9125                 goto out_end_trans;
9126         }
9127
9128         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9129                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9130                                       NULL, NULL);
9131                 if (ret < 0) {
9132                         btrfs_abort_transaction(trans, tree_root, ret);
9133                         err = ret;
9134                         goto out_end_trans;
9135                 } else if (ret > 0) {
9136                         /* if we fail to delete the orphan item this time
9137                          * around, it'll get picked up the next time.
9138                          *
9139                          * The most common failure here is just -ENOENT.
9140                          */
9141                         btrfs_del_orphan_item(trans, tree_root,
9142                                               root->root_key.objectid);
9143                 }
9144         }
9145
9146         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9147                 btrfs_add_dropped_root(trans, root);
9148         } else {
9149                 free_extent_buffer(root->node);
9150                 free_extent_buffer(root->commit_root);
9151                 btrfs_put_fs_root(root);
9152         }
9153         root_dropped = true;
9154 out_end_trans:
9155         btrfs_end_transaction_throttle(trans, tree_root);
9156 out_free:
9157         kfree(wc);
9158         btrfs_free_path(path);
9159 out:
9160         /*
9161          * So if we need to stop dropping the snapshot for whatever reason we
9162          * need to make sure to add it back to the dead root list so that we
9163          * keep trying to do the work later.  This also cleans up roots if we
9164          * don't have it in the radix (like when we recover after a power fail
9165          * or unmount) so we don't leak memory.
9166          */
9167         if (!for_reloc && root_dropped == false)
9168                 btrfs_add_dead_root(root);
9169         if (err && err != -EAGAIN)
9170                 btrfs_handle_fs_error(root->fs_info, err, NULL);
9171         return err;
9172 }
9173
9174 /*
9175  * drop subtree rooted at tree block 'node'.
9176  *
9177  * NOTE: this function will unlock and release tree block 'node'
9178  * only used by relocation code
9179  */
9180 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9181                         struct btrfs_root *root,
9182                         struct extent_buffer *node,
9183                         struct extent_buffer *parent)
9184 {
9185         struct btrfs_path *path;
9186         struct walk_control *wc;
9187         int level;
9188         int parent_level;
9189         int ret = 0;
9190         int wret;
9191
9192         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9193
9194         path = btrfs_alloc_path();
9195         if (!path)
9196                 return -ENOMEM;
9197
9198         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9199         if (!wc) {
9200                 btrfs_free_path(path);
9201                 return -ENOMEM;
9202         }
9203
9204         btrfs_assert_tree_locked(parent);
9205         parent_level = btrfs_header_level(parent);
9206         extent_buffer_get(parent);
9207         path->nodes[parent_level] = parent;
9208         path->slots[parent_level] = btrfs_header_nritems(parent);
9209
9210         btrfs_assert_tree_locked(node);
9211         level = btrfs_header_level(node);
9212         path->nodes[level] = node;
9213         path->slots[level] = 0;
9214         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9215
9216         wc->refs[parent_level] = 1;
9217         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9218         wc->level = level;
9219         wc->shared_level = -1;
9220         wc->stage = DROP_REFERENCE;
9221         wc->update_ref = 0;
9222         wc->keep_locks = 1;
9223         wc->for_reloc = 1;
9224         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9225
9226         while (1) {
9227                 wret = walk_down_tree(trans, root, path, wc);
9228                 if (wret < 0) {
9229                         ret = wret;
9230                         break;
9231                 }
9232
9233                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9234                 if (wret < 0)
9235                         ret = wret;
9236                 if (wret != 0)
9237                         break;
9238         }
9239
9240         kfree(wc);
9241         btrfs_free_path(path);
9242         return ret;
9243 }
9244
9245 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9246 {
9247         u64 num_devices;
9248         u64 stripped;
9249
9250         /*
9251          * if restripe for this chunk_type is on pick target profile and
9252          * return, otherwise do the usual balance
9253          */
9254         stripped = get_restripe_target(root->fs_info, flags);
9255         if (stripped)
9256                 return extended_to_chunk(stripped);
9257
9258         num_devices = root->fs_info->fs_devices->rw_devices;
9259
9260         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9261                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9262                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9263
9264         if (num_devices == 1) {
9265                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9266                 stripped = flags & ~stripped;
9267
9268                 /* turn raid0 into single device chunks */
9269                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9270                         return stripped;
9271
9272                 /* turn mirroring into duplication */
9273                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9274                              BTRFS_BLOCK_GROUP_RAID10))
9275                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9276         } else {
9277                 /* they already had raid on here, just return */
9278                 if (flags & stripped)
9279                         return flags;
9280
9281                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9282                 stripped = flags & ~stripped;
9283
9284                 /* switch duplicated blocks with raid1 */
9285                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9286                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9287
9288                 /* this is drive concat, leave it alone */
9289         }
9290
9291         return flags;
9292 }
9293
9294 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9295 {
9296         struct btrfs_space_info *sinfo = cache->space_info;
9297         u64 num_bytes;
9298         u64 min_allocable_bytes;
9299         int ret = -ENOSPC;
9300
9301         /*
9302          * We need some metadata space and system metadata space for
9303          * allocating chunks in some corner cases until we force to set
9304          * it to be readonly.
9305          */
9306         if ((sinfo->flags &
9307              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9308             !force)
9309                 min_allocable_bytes = SZ_1M;
9310         else
9311                 min_allocable_bytes = 0;
9312
9313         spin_lock(&sinfo->lock);
9314         spin_lock(&cache->lock);
9315
9316         if (cache->ro) {
9317                 cache->ro++;
9318                 ret = 0;
9319                 goto out;
9320         }
9321
9322         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9323                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9324
9325         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9326             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9327             min_allocable_bytes <= sinfo->total_bytes) {
9328                 sinfo->bytes_readonly += num_bytes;
9329                 cache->ro++;
9330                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9331                 ret = 0;
9332         }
9333 out:
9334         spin_unlock(&cache->lock);
9335         spin_unlock(&sinfo->lock);
9336         return ret;
9337 }
9338
9339 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9340                              struct btrfs_block_group_cache *cache)
9341
9342 {
9343         struct btrfs_trans_handle *trans;
9344         u64 alloc_flags;
9345         int ret;
9346
9347 again:
9348         trans = btrfs_join_transaction(root);
9349         if (IS_ERR(trans))
9350                 return PTR_ERR(trans);
9351
9352         /*
9353          * we're not allowed to set block groups readonly after the dirty
9354          * block groups cache has started writing.  If it already started,
9355          * back off and let this transaction commit
9356          */
9357         mutex_lock(&root->fs_info->ro_block_group_mutex);
9358         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9359                 u64 transid = trans->transid;
9360
9361                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9362                 btrfs_end_transaction(trans, root);
9363
9364                 ret = btrfs_wait_for_commit(root, transid);
9365                 if (ret)
9366                         return ret;
9367                 goto again;
9368         }
9369
9370         /*
9371          * if we are changing raid levels, try to allocate a corresponding
9372          * block group with the new raid level.
9373          */
9374         alloc_flags = update_block_group_flags(root, cache->flags);
9375         if (alloc_flags != cache->flags) {
9376                 ret = do_chunk_alloc(trans, root, alloc_flags,
9377                                      CHUNK_ALLOC_FORCE);
9378                 /*
9379                  * ENOSPC is allowed here, we may have enough space
9380                  * already allocated at the new raid level to
9381                  * carry on
9382                  */
9383                 if (ret == -ENOSPC)
9384                         ret = 0;
9385                 if (ret < 0)
9386                         goto out;
9387         }
9388
9389         ret = inc_block_group_ro(cache, 0);
9390         if (!ret)
9391                 goto out;
9392         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9393         ret = do_chunk_alloc(trans, root, alloc_flags,
9394                              CHUNK_ALLOC_FORCE);
9395         if (ret < 0)
9396                 goto out;
9397         ret = inc_block_group_ro(cache, 0);
9398 out:
9399         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9400                 alloc_flags = update_block_group_flags(root, cache->flags);
9401                 lock_chunks(root->fs_info->chunk_root);
9402                 check_system_chunk(trans, root, alloc_flags);
9403                 unlock_chunks(root->fs_info->chunk_root);
9404         }
9405         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9406
9407         btrfs_end_transaction(trans, root);
9408         return ret;
9409 }
9410
9411 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9412                             struct btrfs_root *root, u64 type)
9413 {
9414         u64 alloc_flags = get_alloc_profile(root, type);
9415         return do_chunk_alloc(trans, root, alloc_flags,
9416                               CHUNK_ALLOC_FORCE);
9417 }
9418
9419 /*
9420  * helper to account the unused space of all the readonly block group in the
9421  * space_info. takes mirrors into account.
9422  */
9423 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9424 {
9425         struct btrfs_block_group_cache *block_group;
9426         u64 free_bytes = 0;
9427         int factor;
9428
9429         /* It's df, we don't care if it's racy */
9430         if (list_empty(&sinfo->ro_bgs))
9431                 return 0;
9432
9433         spin_lock(&sinfo->lock);
9434         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9435                 spin_lock(&block_group->lock);
9436
9437                 if (!block_group->ro) {
9438                         spin_unlock(&block_group->lock);
9439                         continue;
9440                 }
9441
9442                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9443                                           BTRFS_BLOCK_GROUP_RAID10 |
9444                                           BTRFS_BLOCK_GROUP_DUP))
9445                         factor = 2;
9446                 else
9447                         factor = 1;
9448
9449                 free_bytes += (block_group->key.offset -
9450                                btrfs_block_group_used(&block_group->item)) *
9451                                factor;
9452
9453                 spin_unlock(&block_group->lock);
9454         }
9455         spin_unlock(&sinfo->lock);
9456
9457         return free_bytes;
9458 }
9459
9460 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9461                               struct btrfs_block_group_cache *cache)
9462 {
9463         struct btrfs_space_info *sinfo = cache->space_info;
9464         u64 num_bytes;
9465
9466         BUG_ON(!cache->ro);
9467
9468         spin_lock(&sinfo->lock);
9469         spin_lock(&cache->lock);
9470         if (!--cache->ro) {
9471                 num_bytes = cache->key.offset - cache->reserved -
9472                             cache->pinned - cache->bytes_super -
9473                             btrfs_block_group_used(&cache->item);
9474                 sinfo->bytes_readonly -= num_bytes;
9475                 list_del_init(&cache->ro_list);
9476         }
9477         spin_unlock(&cache->lock);
9478         spin_unlock(&sinfo->lock);
9479 }
9480
9481 /*
9482  * checks to see if its even possible to relocate this block group.
9483  *
9484  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9485  * ok to go ahead and try.
9486  */
9487 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9488 {
9489         struct btrfs_block_group_cache *block_group;
9490         struct btrfs_space_info *space_info;
9491         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9492         struct btrfs_device *device;
9493         struct btrfs_trans_handle *trans;
9494         u64 min_free;
9495         u64 dev_min = 1;
9496         u64 dev_nr = 0;
9497         u64 target;
9498         int debug;
9499         int index;
9500         int full = 0;
9501         int ret = 0;
9502
9503         debug = btrfs_test_opt(root, ENOSPC_DEBUG);
9504
9505         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9506
9507         /* odd, couldn't find the block group, leave it alone */
9508         if (!block_group) {
9509                 if (debug)
9510                         btrfs_warn(root->fs_info,
9511                                    "can't find block group for bytenr %llu",
9512                                    bytenr);
9513                 return -1;
9514         }
9515
9516         min_free = btrfs_block_group_used(&block_group->item);
9517
9518         /* no bytes used, we're good */
9519         if (!min_free)
9520                 goto out;
9521
9522         space_info = block_group->space_info;
9523         spin_lock(&space_info->lock);
9524
9525         full = space_info->full;
9526
9527         /*
9528          * if this is the last block group we have in this space, we can't
9529          * relocate it unless we're able to allocate a new chunk below.
9530          *
9531          * Otherwise, we need to make sure we have room in the space to handle
9532          * all of the extents from this block group.  If we can, we're good
9533          */
9534         if ((space_info->total_bytes != block_group->key.offset) &&
9535             (space_info->bytes_used + space_info->bytes_reserved +
9536              space_info->bytes_pinned + space_info->bytes_readonly +
9537              min_free < space_info->total_bytes)) {
9538                 spin_unlock(&space_info->lock);
9539                 goto out;
9540         }
9541         spin_unlock(&space_info->lock);
9542
9543         /*
9544          * ok we don't have enough space, but maybe we have free space on our
9545          * devices to allocate new chunks for relocation, so loop through our
9546          * alloc devices and guess if we have enough space.  if this block
9547          * group is going to be restriped, run checks against the target
9548          * profile instead of the current one.
9549          */
9550         ret = -1;
9551
9552         /*
9553          * index:
9554          *      0: raid10
9555          *      1: raid1
9556          *      2: dup
9557          *      3: raid0
9558          *      4: single
9559          */
9560         target = get_restripe_target(root->fs_info, block_group->flags);
9561         if (target) {
9562                 index = __get_raid_index(extended_to_chunk(target));
9563         } else {
9564                 /*
9565                  * this is just a balance, so if we were marked as full
9566                  * we know there is no space for a new chunk
9567                  */
9568                 if (full) {
9569                         if (debug)
9570                                 btrfs_warn(root->fs_info,
9571                                         "no space to alloc new chunk for block group %llu",
9572                                         block_group->key.objectid);
9573                         goto out;
9574                 }
9575
9576                 index = get_block_group_index(block_group);
9577         }
9578
9579         if (index == BTRFS_RAID_RAID10) {
9580                 dev_min = 4;
9581                 /* Divide by 2 */
9582                 min_free >>= 1;
9583         } else if (index == BTRFS_RAID_RAID1) {
9584                 dev_min = 2;
9585         } else if (index == BTRFS_RAID_DUP) {
9586                 /* Multiply by 2 */
9587                 min_free <<= 1;
9588         } else if (index == BTRFS_RAID_RAID0) {
9589                 dev_min = fs_devices->rw_devices;
9590                 min_free = div64_u64(min_free, dev_min);
9591         }
9592
9593         /* We need to do this so that we can look at pending chunks */
9594         trans = btrfs_join_transaction(root);
9595         if (IS_ERR(trans)) {
9596                 ret = PTR_ERR(trans);
9597                 goto out;
9598         }
9599
9600         mutex_lock(&root->fs_info->chunk_mutex);
9601         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9602                 u64 dev_offset;
9603
9604                 /*
9605                  * check to make sure we can actually find a chunk with enough
9606                  * space to fit our block group in.
9607                  */
9608                 if (device->total_bytes > device->bytes_used + min_free &&
9609                     !device->is_tgtdev_for_dev_replace) {
9610                         ret = find_free_dev_extent(trans, device, min_free,
9611                                                    &dev_offset, NULL);
9612                         if (!ret)
9613                                 dev_nr++;
9614
9615                         if (dev_nr >= dev_min)
9616                                 break;
9617
9618                         ret = -1;
9619                 }
9620         }
9621         if (debug && ret == -1)
9622                 btrfs_warn(root->fs_info,
9623                         "no space to allocate a new chunk for block group %llu",
9624                         block_group->key.objectid);
9625         mutex_unlock(&root->fs_info->chunk_mutex);
9626         btrfs_end_transaction(trans, root);
9627 out:
9628         btrfs_put_block_group(block_group);
9629         return ret;
9630 }
9631
9632 static int find_first_block_group(struct btrfs_root *root,
9633                 struct btrfs_path *path, struct btrfs_key *key)
9634 {
9635         int ret = 0;
9636         struct btrfs_key found_key;
9637         struct extent_buffer *leaf;
9638         int slot;
9639
9640         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9641         if (ret < 0)
9642                 goto out;
9643
9644         while (1) {
9645                 slot = path->slots[0];
9646                 leaf = path->nodes[0];
9647                 if (slot >= btrfs_header_nritems(leaf)) {
9648                         ret = btrfs_next_leaf(root, path);
9649                         if (ret == 0)
9650                                 continue;
9651                         if (ret < 0)
9652                                 goto out;
9653                         break;
9654                 }
9655                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9656
9657                 if (found_key.objectid >= key->objectid &&
9658                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9659                         ret = 0;
9660                         goto out;
9661                 }
9662                 path->slots[0]++;
9663         }
9664 out:
9665         return ret;
9666 }
9667
9668 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9669 {
9670         struct btrfs_block_group_cache *block_group;
9671         u64 last = 0;
9672
9673         while (1) {
9674                 struct inode *inode;
9675
9676                 block_group = btrfs_lookup_first_block_group(info, last);
9677                 while (block_group) {
9678                         spin_lock(&block_group->lock);
9679                         if (block_group->iref)
9680                                 break;
9681                         spin_unlock(&block_group->lock);
9682                         block_group = next_block_group(info->tree_root,
9683                                                        block_group);
9684                 }
9685                 if (!block_group) {
9686                         if (last == 0)
9687                                 break;
9688                         last = 0;
9689                         continue;
9690                 }
9691
9692                 inode = block_group->inode;
9693                 block_group->iref = 0;
9694                 block_group->inode = NULL;
9695                 spin_unlock(&block_group->lock);
9696                 iput(inode);
9697                 last = block_group->key.objectid + block_group->key.offset;
9698                 btrfs_put_block_group(block_group);
9699         }
9700 }
9701
9702 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9703 {
9704         struct btrfs_block_group_cache *block_group;
9705         struct btrfs_space_info *space_info;
9706         struct btrfs_caching_control *caching_ctl;
9707         struct rb_node *n;
9708
9709         down_write(&info->commit_root_sem);
9710         while (!list_empty(&info->caching_block_groups)) {
9711                 caching_ctl = list_entry(info->caching_block_groups.next,
9712                                          struct btrfs_caching_control, list);
9713                 list_del(&caching_ctl->list);
9714                 put_caching_control(caching_ctl);
9715         }
9716         up_write(&info->commit_root_sem);
9717
9718         spin_lock(&info->unused_bgs_lock);
9719         while (!list_empty(&info->unused_bgs)) {
9720                 block_group = list_first_entry(&info->unused_bgs,
9721                                                struct btrfs_block_group_cache,
9722                                                bg_list);
9723                 list_del_init(&block_group->bg_list);
9724                 btrfs_put_block_group(block_group);
9725         }
9726         spin_unlock(&info->unused_bgs_lock);
9727
9728         spin_lock(&info->block_group_cache_lock);
9729         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9730                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9731                                        cache_node);
9732                 rb_erase(&block_group->cache_node,
9733                          &info->block_group_cache_tree);
9734                 RB_CLEAR_NODE(&block_group->cache_node);
9735                 spin_unlock(&info->block_group_cache_lock);
9736
9737                 down_write(&block_group->space_info->groups_sem);
9738                 list_del(&block_group->list);
9739                 up_write(&block_group->space_info->groups_sem);
9740
9741                 if (block_group->cached == BTRFS_CACHE_STARTED)
9742                         wait_block_group_cache_done(block_group);
9743
9744                 /*
9745                  * We haven't cached this block group, which means we could
9746                  * possibly have excluded extents on this block group.
9747                  */
9748                 if (block_group->cached == BTRFS_CACHE_NO ||
9749                     block_group->cached == BTRFS_CACHE_ERROR)
9750                         free_excluded_extents(info->extent_root, block_group);
9751
9752                 btrfs_remove_free_space_cache(block_group);
9753                 btrfs_put_block_group(block_group);
9754
9755                 spin_lock(&info->block_group_cache_lock);
9756         }
9757         spin_unlock(&info->block_group_cache_lock);
9758
9759         /* now that all the block groups are freed, go through and
9760          * free all the space_info structs.  This is only called during
9761          * the final stages of unmount, and so we know nobody is
9762          * using them.  We call synchronize_rcu() once before we start,
9763          * just to be on the safe side.
9764          */
9765         synchronize_rcu();
9766
9767         release_global_block_rsv(info);
9768
9769         while (!list_empty(&info->space_info)) {
9770                 int i;
9771
9772                 space_info = list_entry(info->space_info.next,
9773                                         struct btrfs_space_info,
9774                                         list);
9775                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9776                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9777                             space_info->bytes_reserved > 0 ||
9778                             space_info->bytes_may_use > 0)) {
9779                                 dump_space_info(space_info, 0, 0);
9780                         }
9781                 }
9782                 list_del(&space_info->list);
9783                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9784                         struct kobject *kobj;
9785                         kobj = space_info->block_group_kobjs[i];
9786                         space_info->block_group_kobjs[i] = NULL;
9787                         if (kobj) {
9788                                 kobject_del(kobj);
9789                                 kobject_put(kobj);
9790                         }
9791                 }
9792                 kobject_del(&space_info->kobj);
9793                 kobject_put(&space_info->kobj);
9794         }
9795         return 0;
9796 }
9797
9798 static void __link_block_group(struct btrfs_space_info *space_info,
9799                                struct btrfs_block_group_cache *cache)
9800 {
9801         int index = get_block_group_index(cache);
9802         bool first = false;
9803
9804         down_write(&space_info->groups_sem);
9805         if (list_empty(&space_info->block_groups[index]))
9806                 first = true;
9807         list_add_tail(&cache->list, &space_info->block_groups[index]);
9808         up_write(&space_info->groups_sem);
9809
9810         if (first) {
9811                 struct raid_kobject *rkobj;
9812                 int ret;
9813
9814                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9815                 if (!rkobj)
9816                         goto out_err;
9817                 rkobj->raid_type = index;
9818                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9819                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9820                                   "%s", get_raid_name(index));
9821                 if (ret) {
9822                         kobject_put(&rkobj->kobj);
9823                         goto out_err;
9824                 }
9825                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9826         }
9827
9828         return;
9829 out_err:
9830         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9831 }
9832
9833 static struct btrfs_block_group_cache *
9834 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9835 {
9836         struct btrfs_block_group_cache *cache;
9837
9838         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9839         if (!cache)
9840                 return NULL;
9841
9842         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9843                                         GFP_NOFS);
9844         if (!cache->free_space_ctl) {
9845                 kfree(cache);
9846                 return NULL;
9847         }
9848
9849         cache->key.objectid = start;
9850         cache->key.offset = size;
9851         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9852
9853         cache->sectorsize = root->sectorsize;
9854         cache->fs_info = root->fs_info;
9855         cache->full_stripe_len = btrfs_full_stripe_len(root,
9856                                                &root->fs_info->mapping_tree,
9857                                                start);
9858         set_free_space_tree_thresholds(cache);
9859
9860         atomic_set(&cache->count, 1);
9861         spin_lock_init(&cache->lock);
9862         init_rwsem(&cache->data_rwsem);
9863         INIT_LIST_HEAD(&cache->list);
9864         INIT_LIST_HEAD(&cache->cluster_list);
9865         INIT_LIST_HEAD(&cache->bg_list);
9866         INIT_LIST_HEAD(&cache->ro_list);
9867         INIT_LIST_HEAD(&cache->dirty_list);
9868         INIT_LIST_HEAD(&cache->io_list);
9869         btrfs_init_free_space_ctl(cache);
9870         atomic_set(&cache->trimming, 0);
9871         mutex_init(&cache->free_space_lock);
9872
9873         return cache;
9874 }
9875
9876 int btrfs_read_block_groups(struct btrfs_root *root)
9877 {
9878         struct btrfs_path *path;
9879         int ret;
9880         struct btrfs_block_group_cache *cache;
9881         struct btrfs_fs_info *info = root->fs_info;
9882         struct btrfs_space_info *space_info;
9883         struct btrfs_key key;
9884         struct btrfs_key found_key;
9885         struct extent_buffer *leaf;
9886         int need_clear = 0;
9887         u64 cache_gen;
9888
9889         root = info->extent_root;
9890         key.objectid = 0;
9891         key.offset = 0;
9892         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9893         path = btrfs_alloc_path();
9894         if (!path)
9895                 return -ENOMEM;
9896         path->reada = READA_FORWARD;
9897
9898         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9899         if (btrfs_test_opt(root, SPACE_CACHE) &&
9900             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9901                 need_clear = 1;
9902         if (btrfs_test_opt(root, CLEAR_CACHE))
9903                 need_clear = 1;
9904
9905         while (1) {
9906                 ret = find_first_block_group(root, path, &key);
9907                 if (ret > 0)
9908                         break;
9909                 if (ret != 0)
9910                         goto error;
9911
9912                 leaf = path->nodes[0];
9913                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9914
9915                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9916                                                        found_key.offset);
9917                 if (!cache) {
9918                         ret = -ENOMEM;
9919                         goto error;
9920                 }
9921
9922                 if (need_clear) {
9923                         /*
9924                          * When we mount with old space cache, we need to
9925                          * set BTRFS_DC_CLEAR and set dirty flag.
9926                          *
9927                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9928                          *    truncate the old free space cache inode and
9929                          *    setup a new one.
9930                          * b) Setting 'dirty flag' makes sure that we flush
9931                          *    the new space cache info onto disk.
9932                          */
9933                         if (btrfs_test_opt(root, SPACE_CACHE))
9934                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9935                 }
9936
9937                 read_extent_buffer(leaf, &cache->item,
9938                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9939                                    sizeof(cache->item));
9940                 cache->flags = btrfs_block_group_flags(&cache->item);
9941
9942                 key.objectid = found_key.objectid + found_key.offset;
9943                 btrfs_release_path(path);
9944
9945                 /*
9946                  * We need to exclude the super stripes now so that the space
9947                  * info has super bytes accounted for, otherwise we'll think
9948                  * we have more space than we actually do.
9949                  */
9950                 ret = exclude_super_stripes(root, cache);
9951                 if (ret) {
9952                         /*
9953                          * We may have excluded something, so call this just in
9954                          * case.
9955                          */
9956                         free_excluded_extents(root, cache);
9957                         btrfs_put_block_group(cache);
9958                         goto error;
9959                 }
9960
9961                 /*
9962                  * check for two cases, either we are full, and therefore
9963                  * don't need to bother with the caching work since we won't
9964                  * find any space, or we are empty, and we can just add all
9965                  * the space in and be done with it.  This saves us _alot_ of
9966                  * time, particularly in the full case.
9967                  */
9968                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9969                         cache->last_byte_to_unpin = (u64)-1;
9970                         cache->cached = BTRFS_CACHE_FINISHED;
9971                         free_excluded_extents(root, cache);
9972                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9973                         cache->last_byte_to_unpin = (u64)-1;
9974                         cache->cached = BTRFS_CACHE_FINISHED;
9975                         add_new_free_space(cache, root->fs_info,
9976                                            found_key.objectid,
9977                                            found_key.objectid +
9978                                            found_key.offset);
9979                         free_excluded_extents(root, cache);
9980                 }
9981
9982                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9983                 if (ret) {
9984                         btrfs_remove_free_space_cache(cache);
9985                         btrfs_put_block_group(cache);
9986                         goto error;
9987                 }
9988
9989                 ret = update_space_info(info, cache->flags, found_key.offset,
9990                                         btrfs_block_group_used(&cache->item),
9991                                         &space_info);
9992                 if (ret) {
9993                         btrfs_remove_free_space_cache(cache);
9994                         spin_lock(&info->block_group_cache_lock);
9995                         rb_erase(&cache->cache_node,
9996                                  &info->block_group_cache_tree);
9997                         RB_CLEAR_NODE(&cache->cache_node);
9998                         spin_unlock(&info->block_group_cache_lock);
9999                         btrfs_put_block_group(cache);
10000                         goto error;
10001                 }
10002
10003                 cache->space_info = space_info;
10004                 spin_lock(&cache->space_info->lock);
10005                 cache->space_info->bytes_readonly += cache->bytes_super;
10006                 spin_unlock(&cache->space_info->lock);
10007
10008                 __link_block_group(space_info, cache);
10009
10010                 set_avail_alloc_bits(root->fs_info, cache->flags);
10011                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
10012                         inc_block_group_ro(cache, 1);
10013                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10014                         spin_lock(&info->unused_bgs_lock);
10015                         /* Should always be true but just in case. */
10016                         if (list_empty(&cache->bg_list)) {
10017                                 btrfs_get_block_group(cache);
10018                                 list_add_tail(&cache->bg_list,
10019                                               &info->unused_bgs);
10020                         }
10021                         spin_unlock(&info->unused_bgs_lock);
10022                 }
10023         }
10024
10025         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
10026                 if (!(get_alloc_profile(root, space_info->flags) &
10027                       (BTRFS_BLOCK_GROUP_RAID10 |
10028                        BTRFS_BLOCK_GROUP_RAID1 |
10029                        BTRFS_BLOCK_GROUP_RAID5 |
10030                        BTRFS_BLOCK_GROUP_RAID6 |
10031                        BTRFS_BLOCK_GROUP_DUP)))
10032                         continue;
10033                 /*
10034                  * avoid allocating from un-mirrored block group if there are
10035                  * mirrored block groups.
10036                  */
10037                 list_for_each_entry(cache,
10038                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10039                                 list)
10040                         inc_block_group_ro(cache, 1);
10041                 list_for_each_entry(cache,
10042                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10043                                 list)
10044                         inc_block_group_ro(cache, 1);
10045         }
10046
10047         init_global_block_rsv(info);
10048         ret = 0;
10049 error:
10050         btrfs_free_path(path);
10051         return ret;
10052 }
10053
10054 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10055                                        struct btrfs_root *root)
10056 {
10057         struct btrfs_block_group_cache *block_group, *tmp;
10058         struct btrfs_root *extent_root = root->fs_info->extent_root;
10059         struct btrfs_block_group_item item;
10060         struct btrfs_key key;
10061         int ret = 0;
10062         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10063
10064         trans->can_flush_pending_bgs = false;
10065         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
10066                 if (ret)
10067                         goto next;
10068
10069                 spin_lock(&block_group->lock);
10070                 memcpy(&item, &block_group->item, sizeof(item));
10071                 memcpy(&key, &block_group->key, sizeof(key));
10072                 spin_unlock(&block_group->lock);
10073
10074                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10075                                         sizeof(item));
10076                 if (ret)
10077                         btrfs_abort_transaction(trans, extent_root, ret);
10078                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
10079                                                key.objectid, key.offset);
10080                 if (ret)
10081                         btrfs_abort_transaction(trans, extent_root, ret);
10082                 add_block_group_free_space(trans, root->fs_info, block_group);
10083                 /* already aborted the transaction if it failed. */
10084 next:
10085                 list_del_init(&block_group->bg_list);
10086         }
10087         trans->can_flush_pending_bgs = can_flush_pending_bgs;
10088 }
10089
10090 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10091                            struct btrfs_root *root, u64 bytes_used,
10092                            u64 type, u64 chunk_objectid, u64 chunk_offset,
10093                            u64 size)
10094 {
10095         int ret;
10096         struct btrfs_root *extent_root;
10097         struct btrfs_block_group_cache *cache;
10098
10099         extent_root = root->fs_info->extent_root;
10100
10101         btrfs_set_log_full_commit(root->fs_info, trans);
10102
10103         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
10104         if (!cache)
10105                 return -ENOMEM;
10106
10107         btrfs_set_block_group_used(&cache->item, bytes_used);
10108         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10109         btrfs_set_block_group_flags(&cache->item, type);
10110
10111         cache->flags = type;
10112         cache->last_byte_to_unpin = (u64)-1;
10113         cache->cached = BTRFS_CACHE_FINISHED;
10114         cache->needs_free_space = 1;
10115         ret = exclude_super_stripes(root, cache);
10116         if (ret) {
10117                 /*
10118                  * We may have excluded something, so call this just in
10119                  * case.
10120                  */
10121                 free_excluded_extents(root, cache);
10122                 btrfs_put_block_group(cache);
10123                 return ret;
10124         }
10125
10126         add_new_free_space(cache, root->fs_info, chunk_offset,
10127                            chunk_offset + size);
10128
10129         free_excluded_extents(root, cache);
10130
10131 #ifdef CONFIG_BTRFS_DEBUG
10132         if (btrfs_should_fragment_free_space(root, cache)) {
10133                 u64 new_bytes_used = size - bytes_used;
10134
10135                 bytes_used += new_bytes_used >> 1;
10136                 fragment_free_space(root, cache);
10137         }
10138 #endif
10139         /*
10140          * Call to ensure the corresponding space_info object is created and
10141          * assigned to our block group, but don't update its counters just yet.
10142          * We want our bg to be added to the rbtree with its ->space_info set.
10143          */
10144         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
10145                                 &cache->space_info);
10146         if (ret) {
10147                 btrfs_remove_free_space_cache(cache);
10148                 btrfs_put_block_group(cache);
10149                 return ret;
10150         }
10151
10152         ret = btrfs_add_block_group_cache(root->fs_info, cache);
10153         if (ret) {
10154                 btrfs_remove_free_space_cache(cache);
10155                 btrfs_put_block_group(cache);
10156                 return ret;
10157         }
10158
10159         /*
10160          * Now that our block group has its ->space_info set and is inserted in
10161          * the rbtree, update the space info's counters.
10162          */
10163         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10164                                 &cache->space_info);
10165         if (ret) {
10166                 btrfs_remove_free_space_cache(cache);
10167                 spin_lock(&root->fs_info->block_group_cache_lock);
10168                 rb_erase(&cache->cache_node,
10169                          &root->fs_info->block_group_cache_tree);
10170                 RB_CLEAR_NODE(&cache->cache_node);
10171                 spin_unlock(&root->fs_info->block_group_cache_lock);
10172                 btrfs_put_block_group(cache);
10173                 return ret;
10174         }
10175         update_global_block_rsv(root->fs_info);
10176
10177         spin_lock(&cache->space_info->lock);
10178         cache->space_info->bytes_readonly += cache->bytes_super;
10179         spin_unlock(&cache->space_info->lock);
10180
10181         __link_block_group(cache->space_info, cache);
10182
10183         list_add_tail(&cache->bg_list, &trans->new_bgs);
10184
10185         set_avail_alloc_bits(extent_root->fs_info, type);
10186
10187         return 0;
10188 }
10189
10190 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10191 {
10192         u64 extra_flags = chunk_to_extended(flags) &
10193                                 BTRFS_EXTENDED_PROFILE_MASK;
10194
10195         write_seqlock(&fs_info->profiles_lock);
10196         if (flags & BTRFS_BLOCK_GROUP_DATA)
10197                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10198         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10199                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10200         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10201                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10202         write_sequnlock(&fs_info->profiles_lock);
10203 }
10204
10205 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10206                              struct btrfs_root *root, u64 group_start,
10207                              struct extent_map *em)
10208 {
10209         struct btrfs_path *path;
10210         struct btrfs_block_group_cache *block_group;
10211         struct btrfs_free_cluster *cluster;
10212         struct btrfs_root *tree_root = root->fs_info->tree_root;
10213         struct btrfs_key key;
10214         struct inode *inode;
10215         struct kobject *kobj = NULL;
10216         int ret;
10217         int index;
10218         int factor;
10219         struct btrfs_caching_control *caching_ctl = NULL;
10220         bool remove_em;
10221
10222         root = root->fs_info->extent_root;
10223
10224         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10225         BUG_ON(!block_group);
10226         BUG_ON(!block_group->ro);
10227
10228         /*
10229          * Free the reserved super bytes from this block group before
10230          * remove it.
10231          */
10232         free_excluded_extents(root, block_group);
10233
10234         memcpy(&key, &block_group->key, sizeof(key));
10235         index = get_block_group_index(block_group);
10236         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10237                                   BTRFS_BLOCK_GROUP_RAID1 |
10238                                   BTRFS_BLOCK_GROUP_RAID10))
10239                 factor = 2;
10240         else
10241                 factor = 1;
10242
10243         /* make sure this block group isn't part of an allocation cluster */
10244         cluster = &root->fs_info->data_alloc_cluster;
10245         spin_lock(&cluster->refill_lock);
10246         btrfs_return_cluster_to_free_space(block_group, cluster);
10247         spin_unlock(&cluster->refill_lock);
10248
10249         /*
10250          * make sure this block group isn't part of a metadata
10251          * allocation cluster
10252          */
10253         cluster = &root->fs_info->meta_alloc_cluster;
10254         spin_lock(&cluster->refill_lock);
10255         btrfs_return_cluster_to_free_space(block_group, cluster);
10256         spin_unlock(&cluster->refill_lock);
10257
10258         path = btrfs_alloc_path();
10259         if (!path) {
10260                 ret = -ENOMEM;
10261                 goto out;
10262         }
10263
10264         /*
10265          * get the inode first so any iput calls done for the io_list
10266          * aren't the final iput (no unlinks allowed now)
10267          */
10268         inode = lookup_free_space_inode(tree_root, block_group, path);
10269
10270         mutex_lock(&trans->transaction->cache_write_mutex);
10271         /*
10272          * make sure our free spache cache IO is done before remove the
10273          * free space inode
10274          */
10275         spin_lock(&trans->transaction->dirty_bgs_lock);
10276         if (!list_empty(&block_group->io_list)) {
10277                 list_del_init(&block_group->io_list);
10278
10279                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10280
10281                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10282                 btrfs_wait_cache_io(root, trans, block_group,
10283                                     &block_group->io_ctl, path,
10284                                     block_group->key.objectid);
10285                 btrfs_put_block_group(block_group);
10286                 spin_lock(&trans->transaction->dirty_bgs_lock);
10287         }
10288
10289         if (!list_empty(&block_group->dirty_list)) {
10290                 list_del_init(&block_group->dirty_list);
10291                 btrfs_put_block_group(block_group);
10292         }
10293         spin_unlock(&trans->transaction->dirty_bgs_lock);
10294         mutex_unlock(&trans->transaction->cache_write_mutex);
10295
10296         if (!IS_ERR(inode)) {
10297                 ret = btrfs_orphan_add(trans, inode);
10298                 if (ret) {
10299                         btrfs_add_delayed_iput(inode);
10300                         goto out;
10301                 }
10302                 clear_nlink(inode);
10303                 /* One for the block groups ref */
10304                 spin_lock(&block_group->lock);
10305                 if (block_group->iref) {
10306                         block_group->iref = 0;
10307                         block_group->inode = NULL;
10308                         spin_unlock(&block_group->lock);
10309                         iput(inode);
10310                 } else {
10311                         spin_unlock(&block_group->lock);
10312                 }
10313                 /* One for our lookup ref */
10314                 btrfs_add_delayed_iput(inode);
10315         }
10316
10317         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10318         key.offset = block_group->key.objectid;
10319         key.type = 0;
10320
10321         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10322         if (ret < 0)
10323                 goto out;
10324         if (ret > 0)
10325                 btrfs_release_path(path);
10326         if (ret == 0) {
10327                 ret = btrfs_del_item(trans, tree_root, path);
10328                 if (ret)
10329                         goto out;
10330                 btrfs_release_path(path);
10331         }
10332
10333         spin_lock(&root->fs_info->block_group_cache_lock);
10334         rb_erase(&block_group->cache_node,
10335                  &root->fs_info->block_group_cache_tree);
10336         RB_CLEAR_NODE(&block_group->cache_node);
10337
10338         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10339                 root->fs_info->first_logical_byte = (u64)-1;
10340         spin_unlock(&root->fs_info->block_group_cache_lock);
10341
10342         down_write(&block_group->space_info->groups_sem);
10343         /*
10344          * we must use list_del_init so people can check to see if they
10345          * are still on the list after taking the semaphore
10346          */
10347         list_del_init(&block_group->list);
10348         if (list_empty(&block_group->space_info->block_groups[index])) {
10349                 kobj = block_group->space_info->block_group_kobjs[index];
10350                 block_group->space_info->block_group_kobjs[index] = NULL;
10351                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10352         }
10353         up_write(&block_group->space_info->groups_sem);
10354         if (kobj) {
10355                 kobject_del(kobj);
10356                 kobject_put(kobj);
10357         }
10358
10359         if (block_group->has_caching_ctl)
10360                 caching_ctl = get_caching_control(block_group);
10361         if (block_group->cached == BTRFS_CACHE_STARTED)
10362                 wait_block_group_cache_done(block_group);
10363         if (block_group->has_caching_ctl) {
10364                 down_write(&root->fs_info->commit_root_sem);
10365                 if (!caching_ctl) {
10366                         struct btrfs_caching_control *ctl;
10367
10368                         list_for_each_entry(ctl,
10369                                     &root->fs_info->caching_block_groups, list)
10370                                 if (ctl->block_group == block_group) {
10371                                         caching_ctl = ctl;
10372                                         atomic_inc(&caching_ctl->count);
10373                                         break;
10374                                 }
10375                 }
10376                 if (caching_ctl)
10377                         list_del_init(&caching_ctl->list);
10378                 up_write(&root->fs_info->commit_root_sem);
10379                 if (caching_ctl) {
10380                         /* Once for the caching bgs list and once for us. */
10381                         put_caching_control(caching_ctl);
10382                         put_caching_control(caching_ctl);
10383                 }
10384         }
10385
10386         spin_lock(&trans->transaction->dirty_bgs_lock);
10387         if (!list_empty(&block_group->dirty_list)) {
10388                 WARN_ON(1);
10389         }
10390         if (!list_empty(&block_group->io_list)) {
10391                 WARN_ON(1);
10392         }
10393         spin_unlock(&trans->transaction->dirty_bgs_lock);
10394         btrfs_remove_free_space_cache(block_group);
10395
10396         spin_lock(&block_group->space_info->lock);
10397         list_del_init(&block_group->ro_list);
10398
10399         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10400                 WARN_ON(block_group->space_info->total_bytes
10401                         < block_group->key.offset);
10402                 WARN_ON(block_group->space_info->bytes_readonly
10403                         < block_group->key.offset);
10404                 WARN_ON(block_group->space_info->disk_total
10405                         < block_group->key.offset * factor);
10406         }
10407         block_group->space_info->total_bytes -= block_group->key.offset;
10408         block_group->space_info->bytes_readonly -= block_group->key.offset;
10409         block_group->space_info->disk_total -= block_group->key.offset * factor;
10410
10411         spin_unlock(&block_group->space_info->lock);
10412
10413         memcpy(&key, &block_group->key, sizeof(key));
10414
10415         lock_chunks(root);
10416         if (!list_empty(&em->list)) {
10417                 /* We're in the transaction->pending_chunks list. */
10418                 free_extent_map(em);
10419         }
10420         spin_lock(&block_group->lock);
10421         block_group->removed = 1;
10422         /*
10423          * At this point trimming can't start on this block group, because we
10424          * removed the block group from the tree fs_info->block_group_cache_tree
10425          * so no one can't find it anymore and even if someone already got this
10426          * block group before we removed it from the rbtree, they have already
10427          * incremented block_group->trimming - if they didn't, they won't find
10428          * any free space entries because we already removed them all when we
10429          * called btrfs_remove_free_space_cache().
10430          *
10431          * And we must not remove the extent map from the fs_info->mapping_tree
10432          * to prevent the same logical address range and physical device space
10433          * ranges from being reused for a new block group. This is because our
10434          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10435          * completely transactionless, so while it is trimming a range the
10436          * currently running transaction might finish and a new one start,
10437          * allowing for new block groups to be created that can reuse the same
10438          * physical device locations unless we take this special care.
10439          *
10440          * There may also be an implicit trim operation if the file system
10441          * is mounted with -odiscard. The same protections must remain
10442          * in place until the extents have been discarded completely when
10443          * the transaction commit has completed.
10444          */
10445         remove_em = (atomic_read(&block_group->trimming) == 0);
10446         /*
10447          * Make sure a trimmer task always sees the em in the pinned_chunks list
10448          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10449          * before checking block_group->removed).
10450          */
10451         if (!remove_em) {
10452                 /*
10453                  * Our em might be in trans->transaction->pending_chunks which
10454                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10455                  * and so is the fs_info->pinned_chunks list.
10456                  *
10457                  * So at this point we must be holding the chunk_mutex to avoid
10458                  * any races with chunk allocation (more specifically at
10459                  * volumes.c:contains_pending_extent()), to ensure it always
10460                  * sees the em, either in the pending_chunks list or in the
10461                  * pinned_chunks list.
10462                  */
10463                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10464         }
10465         spin_unlock(&block_group->lock);
10466
10467         if (remove_em) {
10468                 struct extent_map_tree *em_tree;
10469
10470                 em_tree = &root->fs_info->mapping_tree.map_tree;
10471                 write_lock(&em_tree->lock);
10472                 /*
10473                  * The em might be in the pending_chunks list, so make sure the
10474                  * chunk mutex is locked, since remove_extent_mapping() will
10475                  * delete us from that list.
10476                  */
10477                 remove_extent_mapping(em_tree, em);
10478                 write_unlock(&em_tree->lock);
10479                 /* once for the tree */
10480                 free_extent_map(em);
10481         }
10482
10483         unlock_chunks(root);
10484
10485         ret = remove_block_group_free_space(trans, root->fs_info, block_group);
10486         if (ret)
10487                 goto out;
10488
10489         btrfs_put_block_group(block_group);
10490         btrfs_put_block_group(block_group);
10491
10492         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10493         if (ret > 0)
10494                 ret = -EIO;
10495         if (ret < 0)
10496                 goto out;
10497
10498         ret = btrfs_del_item(trans, root, path);
10499 out:
10500         btrfs_free_path(path);
10501         return ret;
10502 }
10503
10504 struct btrfs_trans_handle *
10505 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10506                                      const u64 chunk_offset)
10507 {
10508         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10509         struct extent_map *em;
10510         struct map_lookup *map;
10511         unsigned int num_items;
10512
10513         read_lock(&em_tree->lock);
10514         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10515         read_unlock(&em_tree->lock);
10516         ASSERT(em && em->start == chunk_offset);
10517
10518         /*
10519          * We need to reserve 3 + N units from the metadata space info in order
10520          * to remove a block group (done at btrfs_remove_chunk() and at
10521          * btrfs_remove_block_group()), which are used for:
10522          *
10523          * 1 unit for adding the free space inode's orphan (located in the tree
10524          * of tree roots).
10525          * 1 unit for deleting the block group item (located in the extent
10526          * tree).
10527          * 1 unit for deleting the free space item (located in tree of tree
10528          * roots).
10529          * N units for deleting N device extent items corresponding to each
10530          * stripe (located in the device tree).
10531          *
10532          * In order to remove a block group we also need to reserve units in the
10533          * system space info in order to update the chunk tree (update one or
10534          * more device items and remove one chunk item), but this is done at
10535          * btrfs_remove_chunk() through a call to check_system_chunk().
10536          */
10537         map = em->map_lookup;
10538         num_items = 3 + map->num_stripes;
10539         free_extent_map(em);
10540
10541         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10542                                                            num_items, 1);
10543 }
10544
10545 /*
10546  * Process the unused_bgs list and remove any that don't have any allocated
10547  * space inside of them.
10548  */
10549 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10550 {
10551         struct btrfs_block_group_cache *block_group;
10552         struct btrfs_space_info *space_info;
10553         struct btrfs_root *root = fs_info->extent_root;
10554         struct btrfs_trans_handle *trans;
10555         int ret = 0;
10556
10557         if (!fs_info->open)
10558                 return;
10559
10560         spin_lock(&fs_info->unused_bgs_lock);
10561         while (!list_empty(&fs_info->unused_bgs)) {
10562                 u64 start, end;
10563                 int trimming;
10564
10565                 block_group = list_first_entry(&fs_info->unused_bgs,
10566                                                struct btrfs_block_group_cache,
10567                                                bg_list);
10568                 list_del_init(&block_group->bg_list);
10569
10570                 space_info = block_group->space_info;
10571
10572                 if (ret || btrfs_mixed_space_info(space_info)) {
10573                         btrfs_put_block_group(block_group);
10574                         continue;
10575                 }
10576                 spin_unlock(&fs_info->unused_bgs_lock);
10577
10578                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10579
10580                 /* Don't want to race with allocators so take the groups_sem */
10581                 down_write(&space_info->groups_sem);
10582                 spin_lock(&block_group->lock);
10583                 if (block_group->reserved ||
10584                     btrfs_block_group_used(&block_group->item) ||
10585                     block_group->ro ||
10586                     list_is_singular(&block_group->list)) {
10587                         /*
10588                          * We want to bail if we made new allocations or have
10589                          * outstanding allocations in this block group.  We do
10590                          * the ro check in case balance is currently acting on
10591                          * this block group.
10592                          */
10593                         spin_unlock(&block_group->lock);
10594                         up_write(&space_info->groups_sem);
10595                         goto next;
10596                 }
10597                 spin_unlock(&block_group->lock);
10598
10599                 /* We don't want to force the issue, only flip if it's ok. */
10600                 ret = inc_block_group_ro(block_group, 0);
10601                 up_write(&space_info->groups_sem);
10602                 if (ret < 0) {
10603                         ret = 0;
10604                         goto next;
10605                 }
10606
10607                 /*
10608                  * Want to do this before we do anything else so we can recover
10609                  * properly if we fail to join the transaction.
10610                  */
10611                 trans = btrfs_start_trans_remove_block_group(fs_info,
10612                                                      block_group->key.objectid);
10613                 if (IS_ERR(trans)) {
10614                         btrfs_dec_block_group_ro(root, block_group);
10615                         ret = PTR_ERR(trans);
10616                         goto next;
10617                 }
10618
10619                 /*
10620                  * We could have pending pinned extents for this block group,
10621                  * just delete them, we don't care about them anymore.
10622                  */
10623                 start = block_group->key.objectid;
10624                 end = start + block_group->key.offset - 1;
10625                 /*
10626                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10627                  * btrfs_finish_extent_commit(). If we are at transaction N,
10628                  * another task might be running finish_extent_commit() for the
10629                  * previous transaction N - 1, and have seen a range belonging
10630                  * to the block group in freed_extents[] before we were able to
10631                  * clear the whole block group range from freed_extents[]. This
10632                  * means that task can lookup for the block group after we
10633                  * unpinned it from freed_extents[] and removed it, leading to
10634                  * a BUG_ON() at btrfs_unpin_extent_range().
10635                  */
10636                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10637                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10638                                   EXTENT_DIRTY);
10639                 if (ret) {
10640                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10641                         btrfs_dec_block_group_ro(root, block_group);
10642                         goto end_trans;
10643                 }
10644                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10645                                   EXTENT_DIRTY);
10646                 if (ret) {
10647                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10648                         btrfs_dec_block_group_ro(root, block_group);
10649                         goto end_trans;
10650                 }
10651                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10652
10653                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10654                 spin_lock(&space_info->lock);
10655                 spin_lock(&block_group->lock);
10656
10657                 space_info->bytes_pinned -= block_group->pinned;
10658                 space_info->bytes_readonly += block_group->pinned;
10659                 percpu_counter_add(&space_info->total_bytes_pinned,
10660                                    -block_group->pinned);
10661                 block_group->pinned = 0;
10662
10663                 spin_unlock(&block_group->lock);
10664                 spin_unlock(&space_info->lock);
10665
10666                 /* DISCARD can flip during remount */
10667                 trimming = btrfs_test_opt(root, DISCARD);
10668
10669                 /* Implicit trim during transaction commit. */
10670                 if (trimming)
10671                         btrfs_get_block_group_trimming(block_group);
10672
10673                 /*
10674                  * Btrfs_remove_chunk will abort the transaction if things go
10675                  * horribly wrong.
10676                  */
10677                 ret = btrfs_remove_chunk(trans, root,
10678                                          block_group->key.objectid);
10679
10680                 if (ret) {
10681                         if (trimming)
10682                                 btrfs_put_block_group_trimming(block_group);
10683                         goto end_trans;
10684                 }
10685
10686                 /*
10687                  * If we're not mounted with -odiscard, we can just forget
10688                  * about this block group. Otherwise we'll need to wait
10689                  * until transaction commit to do the actual discard.
10690                  */
10691                 if (trimming) {
10692                         spin_lock(&fs_info->unused_bgs_lock);
10693                         /*
10694                          * A concurrent scrub might have added us to the list
10695                          * fs_info->unused_bgs, so use a list_move operation
10696                          * to add the block group to the deleted_bgs list.
10697                          */
10698                         list_move(&block_group->bg_list,
10699                                   &trans->transaction->deleted_bgs);
10700                         spin_unlock(&fs_info->unused_bgs_lock);
10701                         btrfs_get_block_group(block_group);
10702                 }
10703 end_trans:
10704                 btrfs_end_transaction(trans, root);
10705 next:
10706                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10707                 btrfs_put_block_group(block_group);
10708                 spin_lock(&fs_info->unused_bgs_lock);
10709         }
10710         spin_unlock(&fs_info->unused_bgs_lock);
10711 }
10712
10713 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10714 {
10715         struct btrfs_space_info *space_info;
10716         struct btrfs_super_block *disk_super;
10717         u64 features;
10718         u64 flags;
10719         int mixed = 0;
10720         int ret;
10721
10722         disk_super = fs_info->super_copy;
10723         if (!btrfs_super_root(disk_super))
10724                 return -EINVAL;
10725
10726         features = btrfs_super_incompat_flags(disk_super);
10727         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10728                 mixed = 1;
10729
10730         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10731         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10732         if (ret)
10733                 goto out;
10734
10735         if (mixed) {
10736                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10737                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10738         } else {
10739                 flags = BTRFS_BLOCK_GROUP_METADATA;
10740                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10741                 if (ret)
10742                         goto out;
10743
10744                 flags = BTRFS_BLOCK_GROUP_DATA;
10745                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10746         }
10747 out:
10748         return ret;
10749 }
10750
10751 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10752 {
10753         return unpin_extent_range(root, start, end, false);
10754 }
10755
10756 /*
10757  * It used to be that old block groups would be left around forever.
10758  * Iterating over them would be enough to trim unused space.  Since we
10759  * now automatically remove them, we also need to iterate over unallocated
10760  * space.
10761  *
10762  * We don't want a transaction for this since the discard may take a
10763  * substantial amount of time.  We don't require that a transaction be
10764  * running, but we do need to take a running transaction into account
10765  * to ensure that we're not discarding chunks that were released in
10766  * the current transaction.
10767  *
10768  * Holding the chunks lock will prevent other threads from allocating
10769  * or releasing chunks, but it won't prevent a running transaction
10770  * from committing and releasing the memory that the pending chunks
10771  * list head uses.  For that, we need to take a reference to the
10772  * transaction.
10773  */
10774 static int btrfs_trim_free_extents(struct btrfs_device *device,
10775                                    u64 minlen, u64 *trimmed)
10776 {
10777         u64 start = 0, len = 0;
10778         int ret;
10779
10780         *trimmed = 0;
10781
10782         /* Not writeable = nothing to do. */
10783         if (!device->writeable)
10784                 return 0;
10785
10786         /* No free space = nothing to do. */
10787         if (device->total_bytes <= device->bytes_used)
10788                 return 0;
10789
10790         ret = 0;
10791
10792         while (1) {
10793                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10794                 struct btrfs_transaction *trans;
10795                 u64 bytes;
10796
10797                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10798                 if (ret)
10799                         return ret;
10800
10801                 down_read(&fs_info->commit_root_sem);
10802
10803                 spin_lock(&fs_info->trans_lock);
10804                 trans = fs_info->running_transaction;
10805                 if (trans)
10806                         atomic_inc(&trans->use_count);
10807                 spin_unlock(&fs_info->trans_lock);
10808
10809                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10810                                                  &start, &len);
10811                 if (trans)
10812                         btrfs_put_transaction(trans);
10813
10814                 if (ret) {
10815                         up_read(&fs_info->commit_root_sem);
10816                         mutex_unlock(&fs_info->chunk_mutex);
10817                         if (ret == -ENOSPC)
10818                                 ret = 0;
10819                         break;
10820                 }
10821
10822                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10823                 up_read(&fs_info->commit_root_sem);
10824                 mutex_unlock(&fs_info->chunk_mutex);
10825
10826                 if (ret)
10827                         break;
10828
10829                 start += len;
10830                 *trimmed += bytes;
10831
10832                 if (fatal_signal_pending(current)) {
10833                         ret = -ERESTARTSYS;
10834                         break;
10835                 }
10836
10837                 cond_resched();
10838         }
10839
10840         return ret;
10841 }
10842
10843 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10844 {
10845         struct btrfs_fs_info *fs_info = root->fs_info;
10846         struct btrfs_block_group_cache *cache = NULL;
10847         struct btrfs_device *device;
10848         struct list_head *devices;
10849         u64 group_trimmed;
10850         u64 start;
10851         u64 end;
10852         u64 trimmed = 0;
10853         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10854         int ret = 0;
10855
10856         /*
10857          * try to trim all FS space, our block group may start from non-zero.
10858          */
10859         if (range->len == total_bytes)
10860                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10861         else
10862                 cache = btrfs_lookup_block_group(fs_info, range->start);
10863
10864         while (cache) {
10865                 if (cache->key.objectid >= (range->start + range->len)) {
10866                         btrfs_put_block_group(cache);
10867                         break;
10868                 }
10869
10870                 start = max(range->start, cache->key.objectid);
10871                 end = min(range->start + range->len,
10872                                 cache->key.objectid + cache->key.offset);
10873
10874                 if (end - start >= range->minlen) {
10875                         if (!block_group_cache_done(cache)) {
10876                                 ret = cache_block_group(cache, 0);
10877                                 if (ret) {
10878                                         btrfs_put_block_group(cache);
10879                                         break;
10880                                 }
10881                                 ret = wait_block_group_cache_done(cache);
10882                                 if (ret) {
10883                                         btrfs_put_block_group(cache);
10884                                         break;
10885                                 }
10886                         }
10887                         ret = btrfs_trim_block_group(cache,
10888                                                      &group_trimmed,
10889                                                      start,
10890                                                      end,
10891                                                      range->minlen);
10892
10893                         trimmed += group_trimmed;
10894                         if (ret) {
10895                                 btrfs_put_block_group(cache);
10896                                 break;
10897                         }
10898                 }
10899
10900                 cache = next_block_group(fs_info->tree_root, cache);
10901         }
10902
10903         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10904         devices = &root->fs_info->fs_devices->alloc_list;
10905         list_for_each_entry(device, devices, dev_alloc_list) {
10906                 ret = btrfs_trim_free_extents(device, range->minlen,
10907                                               &group_trimmed);
10908                 if (ret)
10909                         break;
10910
10911                 trimmed += group_trimmed;
10912         }
10913         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10914
10915         range->len = trimmed;
10916         return ret;
10917 }
10918
10919 /*
10920  * btrfs_{start,end}_write_no_snapshoting() are similar to
10921  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10922  * data into the page cache through nocow before the subvolume is snapshoted,
10923  * but flush the data into disk after the snapshot creation, or to prevent
10924  * operations while snapshoting is ongoing and that cause the snapshot to be
10925  * inconsistent (writes followed by expanding truncates for example).
10926  */
10927 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10928 {
10929         percpu_counter_dec(&root->subv_writers->counter);
10930         /*
10931          * Make sure counter is updated before we wake up waiters.
10932          */
10933         smp_mb();
10934         if (waitqueue_active(&root->subv_writers->wait))
10935                 wake_up(&root->subv_writers->wait);
10936 }
10937
10938 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10939 {
10940         if (atomic_read(&root->will_be_snapshoted))
10941                 return 0;
10942
10943         percpu_counter_inc(&root->subv_writers->counter);
10944         /*
10945          * Make sure counter is updated before we check for snapshot creation.
10946          */
10947         smp_mb();
10948         if (atomic_read(&root->will_be_snapshoted)) {
10949                 btrfs_end_write_no_snapshoting(root);
10950                 return 0;
10951         }
10952         return 1;
10953 }
10954
10955 static int wait_snapshoting_atomic_t(atomic_t *a)
10956 {
10957         schedule();
10958         return 0;
10959 }
10960
10961 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
10962 {
10963         while (true) {
10964                 int ret;
10965
10966                 ret = btrfs_start_write_no_snapshoting(root);
10967                 if (ret)
10968                         break;
10969                 wait_on_atomic_t(&root->will_be_snapshoted,
10970                                  wait_snapshoting_atomic_t,
10971                                  TASK_UNINTERRUPTIBLE);
10972         }
10973 }