75f4bed6e6dbc0a1d974d7cbbbfb9604ce64bc5e
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 u64 bytenr, u64 num_bytes, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op,
86                                 int no_quota);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88                                     struct extent_buffer *leaf,
89                                     struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91                                       struct btrfs_root *root,
92                                       u64 parent, u64 root_objectid,
93                                       u64 flags, u64 owner, u64 offset,
94                                       struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96                                      struct btrfs_root *root,
97                                      u64 parent, u64 root_objectid,
98                                      u64 flags, struct btrfs_disk_key *key,
99                                      int level, struct btrfs_key *ins,
100                                      int no_quota);
101 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
102                           struct btrfs_root *extent_root, u64 flags,
103                           int force);
104 static int find_next_key(struct btrfs_path *path, int level,
105                          struct btrfs_key *key);
106 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
107                             int dump_block_groups);
108 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
109                                        u64 num_bytes, int reserve,
110                                        int delalloc);
111 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
112                                u64 num_bytes);
113 int btrfs_pin_extent(struct btrfs_root *root,
114                      u64 bytenr, u64 num_bytes, int reserved);
115
116 static noinline int
117 block_group_cache_done(struct btrfs_block_group_cache *cache)
118 {
119         smp_mb();
120         return cache->cached == BTRFS_CACHE_FINISHED ||
121                 cache->cached == BTRFS_CACHE_ERROR;
122 }
123
124 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
125 {
126         return (cache->flags & bits) == bits;
127 }
128
129 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
130 {
131         atomic_inc(&cache->count);
132 }
133
134 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
135 {
136         if (atomic_dec_and_test(&cache->count)) {
137                 WARN_ON(cache->pinned > 0);
138                 WARN_ON(cache->reserved > 0);
139                 kfree(cache->free_space_ctl);
140                 kfree(cache);
141         }
142 }
143
144 /*
145  * this adds the block group to the fs_info rb tree for the block group
146  * cache
147  */
148 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
149                                 struct btrfs_block_group_cache *block_group)
150 {
151         struct rb_node **p;
152         struct rb_node *parent = NULL;
153         struct btrfs_block_group_cache *cache;
154
155         spin_lock(&info->block_group_cache_lock);
156         p = &info->block_group_cache_tree.rb_node;
157
158         while (*p) {
159                 parent = *p;
160                 cache = rb_entry(parent, struct btrfs_block_group_cache,
161                                  cache_node);
162                 if (block_group->key.objectid < cache->key.objectid) {
163                         p = &(*p)->rb_left;
164                 } else if (block_group->key.objectid > cache->key.objectid) {
165                         p = &(*p)->rb_right;
166                 } else {
167                         spin_unlock(&info->block_group_cache_lock);
168                         return -EEXIST;
169                 }
170         }
171
172         rb_link_node(&block_group->cache_node, parent, p);
173         rb_insert_color(&block_group->cache_node,
174                         &info->block_group_cache_tree);
175
176         if (info->first_logical_byte > block_group->key.objectid)
177                 info->first_logical_byte = block_group->key.objectid;
178
179         spin_unlock(&info->block_group_cache_lock);
180
181         return 0;
182 }
183
184 /*
185  * This will return the block group at or after bytenr if contains is 0, else
186  * it will return the block group that contains the bytenr
187  */
188 static struct btrfs_block_group_cache *
189 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
190                               int contains)
191 {
192         struct btrfs_block_group_cache *cache, *ret = NULL;
193         struct rb_node *n;
194         u64 end, start;
195
196         spin_lock(&info->block_group_cache_lock);
197         n = info->block_group_cache_tree.rb_node;
198
199         while (n) {
200                 cache = rb_entry(n, struct btrfs_block_group_cache,
201                                  cache_node);
202                 end = cache->key.objectid + cache->key.offset - 1;
203                 start = cache->key.objectid;
204
205                 if (bytenr < start) {
206                         if (!contains && (!ret || start < ret->key.objectid))
207                                 ret = cache;
208                         n = n->rb_left;
209                 } else if (bytenr > start) {
210                         if (contains && bytenr <= end) {
211                                 ret = cache;
212                                 break;
213                         }
214                         n = n->rb_right;
215                 } else {
216                         ret = cache;
217                         break;
218                 }
219         }
220         if (ret) {
221                 btrfs_get_block_group(ret);
222                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
223                         info->first_logical_byte = ret->key.objectid;
224         }
225         spin_unlock(&info->block_group_cache_lock);
226
227         return ret;
228 }
229
230 static int add_excluded_extent(struct btrfs_root *root,
231                                u64 start, u64 num_bytes)
232 {
233         u64 end = start + num_bytes - 1;
234         set_extent_bits(&root->fs_info->freed_extents[0],
235                         start, end, EXTENT_UPTODATE, GFP_NOFS);
236         set_extent_bits(&root->fs_info->freed_extents[1],
237                         start, end, EXTENT_UPTODATE, GFP_NOFS);
238         return 0;
239 }
240
241 static void free_excluded_extents(struct btrfs_root *root,
242                                   struct btrfs_block_group_cache *cache)
243 {
244         u64 start, end;
245
246         start = cache->key.objectid;
247         end = start + cache->key.offset - 1;
248
249         clear_extent_bits(&root->fs_info->freed_extents[0],
250                           start, end, EXTENT_UPTODATE, GFP_NOFS);
251         clear_extent_bits(&root->fs_info->freed_extents[1],
252                           start, end, EXTENT_UPTODATE, GFP_NOFS);
253 }
254
255 static int exclude_super_stripes(struct btrfs_root *root,
256                                  struct btrfs_block_group_cache *cache)
257 {
258         u64 bytenr;
259         u64 *logical;
260         int stripe_len;
261         int i, nr, ret;
262
263         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
264                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
265                 cache->bytes_super += stripe_len;
266                 ret = add_excluded_extent(root, cache->key.objectid,
267                                           stripe_len);
268                 if (ret)
269                         return ret;
270         }
271
272         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
273                 bytenr = btrfs_sb_offset(i);
274                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
275                                        cache->key.objectid, bytenr,
276                                        0, &logical, &nr, &stripe_len);
277                 if (ret)
278                         return ret;
279
280                 while (nr--) {
281                         u64 start, len;
282
283                         if (logical[nr] > cache->key.objectid +
284                             cache->key.offset)
285                                 continue;
286
287                         if (logical[nr] + stripe_len <= cache->key.objectid)
288                                 continue;
289
290                         start = logical[nr];
291                         if (start < cache->key.objectid) {
292                                 start = cache->key.objectid;
293                                 len = (logical[nr] + stripe_len) - start;
294                         } else {
295                                 len = min_t(u64, stripe_len,
296                                             cache->key.objectid +
297                                             cache->key.offset - start);
298                         }
299
300                         cache->bytes_super += len;
301                         ret = add_excluded_extent(root, start, len);
302                         if (ret) {
303                                 kfree(logical);
304                                 return ret;
305                         }
306                 }
307
308                 kfree(logical);
309         }
310         return 0;
311 }
312
313 static struct btrfs_caching_control *
314 get_caching_control(struct btrfs_block_group_cache *cache)
315 {
316         struct btrfs_caching_control *ctl;
317
318         spin_lock(&cache->lock);
319         if (!cache->caching_ctl) {
320                 spin_unlock(&cache->lock);
321                 return NULL;
322         }
323
324         ctl = cache->caching_ctl;
325         atomic_inc(&ctl->count);
326         spin_unlock(&cache->lock);
327         return ctl;
328 }
329
330 static void put_caching_control(struct btrfs_caching_control *ctl)
331 {
332         if (atomic_dec_and_test(&ctl->count))
333                 kfree(ctl);
334 }
335
336 /*
337  * this is only called by cache_block_group, since we could have freed extents
338  * we need to check the pinned_extents for any extents that can't be used yet
339  * since their free space will be released as soon as the transaction commits.
340  */
341 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
342                               struct btrfs_fs_info *info, u64 start, u64 end)
343 {
344         u64 extent_start, extent_end, size, total_added = 0;
345         int ret;
346
347         while (start < end) {
348                 ret = find_first_extent_bit(info->pinned_extents, start,
349                                             &extent_start, &extent_end,
350                                             EXTENT_DIRTY | EXTENT_UPTODATE,
351                                             NULL);
352                 if (ret)
353                         break;
354
355                 if (extent_start <= start) {
356                         start = extent_end + 1;
357                 } else if (extent_start > start && extent_start < end) {
358                         size = extent_start - start;
359                         total_added += size;
360                         ret = btrfs_add_free_space(block_group, start,
361                                                    size);
362                         BUG_ON(ret); /* -ENOMEM or logic error */
363                         start = extent_end + 1;
364                 } else {
365                         break;
366                 }
367         }
368
369         if (start < end) {
370                 size = end - start;
371                 total_added += size;
372                 ret = btrfs_add_free_space(block_group, start, size);
373                 BUG_ON(ret); /* -ENOMEM or logic error */
374         }
375
376         return total_added;
377 }
378
379 static noinline void caching_thread(struct btrfs_work *work)
380 {
381         struct btrfs_block_group_cache *block_group;
382         struct btrfs_fs_info *fs_info;
383         struct btrfs_caching_control *caching_ctl;
384         struct btrfs_root *extent_root;
385         struct btrfs_path *path;
386         struct extent_buffer *leaf;
387         struct btrfs_key key;
388         u64 total_found = 0;
389         u64 last = 0;
390         u32 nritems;
391         int ret = -ENOMEM;
392
393         caching_ctl = container_of(work, struct btrfs_caching_control, work);
394         block_group = caching_ctl->block_group;
395         fs_info = block_group->fs_info;
396         extent_root = fs_info->extent_root;
397
398         path = btrfs_alloc_path();
399         if (!path)
400                 goto out;
401
402         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
403
404         /*
405          * We don't want to deadlock with somebody trying to allocate a new
406          * extent for the extent root while also trying to search the extent
407          * root to add free space.  So we skip locking and search the commit
408          * root, since its read-only
409          */
410         path->skip_locking = 1;
411         path->search_commit_root = 1;
412         path->reada = 1;
413
414         key.objectid = last;
415         key.offset = 0;
416         key.type = BTRFS_EXTENT_ITEM_KEY;
417 again:
418         mutex_lock(&caching_ctl->mutex);
419         /* need to make sure the commit_root doesn't disappear */
420         down_read(&fs_info->commit_root_sem);
421
422 next:
423         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
424         if (ret < 0)
425                 goto err;
426
427         leaf = path->nodes[0];
428         nritems = btrfs_header_nritems(leaf);
429
430         while (1) {
431                 if (btrfs_fs_closing(fs_info) > 1) {
432                         last = (u64)-1;
433                         break;
434                 }
435
436                 if (path->slots[0] < nritems) {
437                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
438                 } else {
439                         ret = find_next_key(path, 0, &key);
440                         if (ret)
441                                 break;
442
443                         if (need_resched() ||
444                             rwsem_is_contended(&fs_info->commit_root_sem)) {
445                                 caching_ctl->progress = last;
446                                 btrfs_release_path(path);
447                                 up_read(&fs_info->commit_root_sem);
448                                 mutex_unlock(&caching_ctl->mutex);
449                                 cond_resched();
450                                 goto again;
451                         }
452
453                         ret = btrfs_next_leaf(extent_root, path);
454                         if (ret < 0)
455                                 goto err;
456                         if (ret)
457                                 break;
458                         leaf = path->nodes[0];
459                         nritems = btrfs_header_nritems(leaf);
460                         continue;
461                 }
462
463                 if (key.objectid < last) {
464                         key.objectid = last;
465                         key.offset = 0;
466                         key.type = BTRFS_EXTENT_ITEM_KEY;
467
468                         caching_ctl->progress = last;
469                         btrfs_release_path(path);
470                         goto next;
471                 }
472
473                 if (key.objectid < block_group->key.objectid) {
474                         path->slots[0]++;
475                         continue;
476                 }
477
478                 if (key.objectid >= block_group->key.objectid +
479                     block_group->key.offset)
480                         break;
481
482                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
483                     key.type == BTRFS_METADATA_ITEM_KEY) {
484                         total_found += add_new_free_space(block_group,
485                                                           fs_info, last,
486                                                           key.objectid);
487                         if (key.type == BTRFS_METADATA_ITEM_KEY)
488                                 last = key.objectid +
489                                         fs_info->tree_root->nodesize;
490                         else
491                                 last = key.objectid + key.offset;
492
493                         if (total_found > (1024 * 1024 * 2)) {
494                                 total_found = 0;
495                                 wake_up(&caching_ctl->wait);
496                         }
497                 }
498                 path->slots[0]++;
499         }
500         ret = 0;
501
502         total_found += add_new_free_space(block_group, fs_info, last,
503                                           block_group->key.objectid +
504                                           block_group->key.offset);
505         caching_ctl->progress = (u64)-1;
506
507         spin_lock(&block_group->lock);
508         block_group->caching_ctl = NULL;
509         block_group->cached = BTRFS_CACHE_FINISHED;
510         spin_unlock(&block_group->lock);
511
512 err:
513         btrfs_free_path(path);
514         up_read(&fs_info->commit_root_sem);
515
516         free_excluded_extents(extent_root, block_group);
517
518         mutex_unlock(&caching_ctl->mutex);
519 out:
520         if (ret) {
521                 spin_lock(&block_group->lock);
522                 block_group->caching_ctl = NULL;
523                 block_group->cached = BTRFS_CACHE_ERROR;
524                 spin_unlock(&block_group->lock);
525         }
526         wake_up(&caching_ctl->wait);
527
528         put_caching_control(caching_ctl);
529         btrfs_put_block_group(block_group);
530 }
531
532 static int cache_block_group(struct btrfs_block_group_cache *cache,
533                              int load_cache_only)
534 {
535         DEFINE_WAIT(wait);
536         struct btrfs_fs_info *fs_info = cache->fs_info;
537         struct btrfs_caching_control *caching_ctl;
538         int ret = 0;
539
540         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
541         if (!caching_ctl)
542                 return -ENOMEM;
543
544         INIT_LIST_HEAD(&caching_ctl->list);
545         mutex_init(&caching_ctl->mutex);
546         init_waitqueue_head(&caching_ctl->wait);
547         caching_ctl->block_group = cache;
548         caching_ctl->progress = cache->key.objectid;
549         atomic_set(&caching_ctl->count, 1);
550         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
551                         caching_thread, NULL, NULL);
552
553         spin_lock(&cache->lock);
554         /*
555          * This should be a rare occasion, but this could happen I think in the
556          * case where one thread starts to load the space cache info, and then
557          * some other thread starts a transaction commit which tries to do an
558          * allocation while the other thread is still loading the space cache
559          * info.  The previous loop should have kept us from choosing this block
560          * group, but if we've moved to the state where we will wait on caching
561          * block groups we need to first check if we're doing a fast load here,
562          * so we can wait for it to finish, otherwise we could end up allocating
563          * from a block group who's cache gets evicted for one reason or
564          * another.
565          */
566         while (cache->cached == BTRFS_CACHE_FAST) {
567                 struct btrfs_caching_control *ctl;
568
569                 ctl = cache->caching_ctl;
570                 atomic_inc(&ctl->count);
571                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572                 spin_unlock(&cache->lock);
573
574                 schedule();
575
576                 finish_wait(&ctl->wait, &wait);
577                 put_caching_control(ctl);
578                 spin_lock(&cache->lock);
579         }
580
581         if (cache->cached != BTRFS_CACHE_NO) {
582                 spin_unlock(&cache->lock);
583                 kfree(caching_ctl);
584                 return 0;
585         }
586         WARN_ON(cache->caching_ctl);
587         cache->caching_ctl = caching_ctl;
588         cache->cached = BTRFS_CACHE_FAST;
589         spin_unlock(&cache->lock);
590
591         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592                 mutex_lock(&caching_ctl->mutex);
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                         caching_ctl->progress = (u64)-1;
601                 } else {
602                         if (load_cache_only) {
603                                 cache->caching_ctl = NULL;
604                                 cache->cached = BTRFS_CACHE_NO;
605                         } else {
606                                 cache->cached = BTRFS_CACHE_STARTED;
607                                 cache->has_caching_ctl = 1;
608                         }
609                 }
610                 spin_unlock(&cache->lock);
611                 mutex_unlock(&caching_ctl->mutex);
612
613                 wake_up(&caching_ctl->wait);
614                 if (ret == 1) {
615                         put_caching_control(caching_ctl);
616                         free_excluded_extents(fs_info->extent_root, cache);
617                         return 0;
618                 }
619         } else {
620                 /*
621                  * We are not going to do the fast caching, set cached to the
622                  * appropriate value and wakeup any waiters.
623                  */
624                 spin_lock(&cache->lock);
625                 if (load_cache_only) {
626                         cache->caching_ctl = NULL;
627                         cache->cached = BTRFS_CACHE_NO;
628                 } else {
629                         cache->cached = BTRFS_CACHE_STARTED;
630                         cache->has_caching_ctl = 1;
631                 }
632                 spin_unlock(&cache->lock);
633                 wake_up(&caching_ctl->wait);
634         }
635
636         if (load_cache_only) {
637                 put_caching_control(caching_ctl);
638                 return 0;
639         }
640
641         down_write(&fs_info->commit_root_sem);
642         atomic_inc(&caching_ctl->count);
643         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
644         up_write(&fs_info->commit_root_sem);
645
646         btrfs_get_block_group(cache);
647
648         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
649
650         return ret;
651 }
652
653 /*
654  * return the block group that starts at or after bytenr
655  */
656 static struct btrfs_block_group_cache *
657 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
658 {
659         struct btrfs_block_group_cache *cache;
660
661         cache = block_group_cache_tree_search(info, bytenr, 0);
662
663         return cache;
664 }
665
666 /*
667  * return the block group that contains the given bytenr
668  */
669 struct btrfs_block_group_cache *btrfs_lookup_block_group(
670                                                  struct btrfs_fs_info *info,
671                                                  u64 bytenr)
672 {
673         struct btrfs_block_group_cache *cache;
674
675         cache = block_group_cache_tree_search(info, bytenr, 1);
676
677         return cache;
678 }
679
680 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
681                                                   u64 flags)
682 {
683         struct list_head *head = &info->space_info;
684         struct btrfs_space_info *found;
685
686         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
687
688         rcu_read_lock();
689         list_for_each_entry_rcu(found, head, list) {
690                 if (found->flags & flags) {
691                         rcu_read_unlock();
692                         return found;
693                 }
694         }
695         rcu_read_unlock();
696         return NULL;
697 }
698
699 /*
700  * after adding space to the filesystem, we need to clear the full flags
701  * on all the space infos.
702  */
703 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
704 {
705         struct list_head *head = &info->space_info;
706         struct btrfs_space_info *found;
707
708         rcu_read_lock();
709         list_for_each_entry_rcu(found, head, list)
710                 found->full = 0;
711         rcu_read_unlock();
712 }
713
714 /* simple helper to search for an existing data extent at a given offset */
715 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
716 {
717         int ret;
718         struct btrfs_key key;
719         struct btrfs_path *path;
720
721         path = btrfs_alloc_path();
722         if (!path)
723                 return -ENOMEM;
724
725         key.objectid = start;
726         key.offset = len;
727         key.type = BTRFS_EXTENT_ITEM_KEY;
728         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
729                                 0, 0);
730         btrfs_free_path(path);
731         return ret;
732 }
733
734 /*
735  * helper function to lookup reference count and flags of a tree block.
736  *
737  * the head node for delayed ref is used to store the sum of all the
738  * reference count modifications queued up in the rbtree. the head
739  * node may also store the extent flags to set. This way you can check
740  * to see what the reference count and extent flags would be if all of
741  * the delayed refs are not processed.
742  */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744                              struct btrfs_root *root, u64 bytenr,
745                              u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747         struct btrfs_delayed_ref_head *head;
748         struct btrfs_delayed_ref_root *delayed_refs;
749         struct btrfs_path *path;
750         struct btrfs_extent_item *ei;
751         struct extent_buffer *leaf;
752         struct btrfs_key key;
753         u32 item_size;
754         u64 num_refs;
755         u64 extent_flags;
756         int ret;
757
758         /*
759          * If we don't have skinny metadata, don't bother doing anything
760          * different
761          */
762         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763                 offset = root->nodesize;
764                 metadata = 0;
765         }
766
767         path = btrfs_alloc_path();
768         if (!path)
769                 return -ENOMEM;
770
771         if (!trans) {
772                 path->skip_locking = 1;
773                 path->search_commit_root = 1;
774         }
775
776 search_again:
777         key.objectid = bytenr;
778         key.offset = offset;
779         if (metadata)
780                 key.type = BTRFS_METADATA_ITEM_KEY;
781         else
782                 key.type = BTRFS_EXTENT_ITEM_KEY;
783
784         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
785                                 &key, path, 0, 0);
786         if (ret < 0)
787                 goto out_free;
788
789         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
790                 if (path->slots[0]) {
791                         path->slots[0]--;
792                         btrfs_item_key_to_cpu(path->nodes[0], &key,
793                                               path->slots[0]);
794                         if (key.objectid == bytenr &&
795                             key.type == BTRFS_EXTENT_ITEM_KEY &&
796                             key.offset == root->nodesize)
797                                 ret = 0;
798                 }
799         }
800
801         if (ret == 0) {
802                 leaf = path->nodes[0];
803                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
804                 if (item_size >= sizeof(*ei)) {
805                         ei = btrfs_item_ptr(leaf, path->slots[0],
806                                             struct btrfs_extent_item);
807                         num_refs = btrfs_extent_refs(leaf, ei);
808                         extent_flags = btrfs_extent_flags(leaf, ei);
809                 } else {
810 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
811                         struct btrfs_extent_item_v0 *ei0;
812                         BUG_ON(item_size != sizeof(*ei0));
813                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
814                                              struct btrfs_extent_item_v0);
815                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
816                         /* FIXME: this isn't correct for data */
817                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
818 #else
819                         BUG();
820 #endif
821                 }
822                 BUG_ON(num_refs == 0);
823         } else {
824                 num_refs = 0;
825                 extent_flags = 0;
826                 ret = 0;
827         }
828
829         if (!trans)
830                 goto out;
831
832         delayed_refs = &trans->transaction->delayed_refs;
833         spin_lock(&delayed_refs->lock);
834         head = btrfs_find_delayed_ref_head(trans, bytenr);
835         if (head) {
836                 if (!mutex_trylock(&head->mutex)) {
837                         atomic_inc(&head->node.refs);
838                         spin_unlock(&delayed_refs->lock);
839
840                         btrfs_release_path(path);
841
842                         /*
843                          * Mutex was contended, block until it's released and try
844                          * again
845                          */
846                         mutex_lock(&head->mutex);
847                         mutex_unlock(&head->mutex);
848                         btrfs_put_delayed_ref(&head->node);
849                         goto search_again;
850                 }
851                 spin_lock(&head->lock);
852                 if (head->extent_op && head->extent_op->update_flags)
853                         extent_flags |= head->extent_op->flags_to_set;
854                 else
855                         BUG_ON(num_refs == 0);
856
857                 num_refs += head->node.ref_mod;
858                 spin_unlock(&head->lock);
859                 mutex_unlock(&head->mutex);
860         }
861         spin_unlock(&delayed_refs->lock);
862 out:
863         WARN_ON(num_refs == 0);
864         if (refs)
865                 *refs = num_refs;
866         if (flags)
867                 *flags = extent_flags;
868 out_free:
869         btrfs_free_path(path);
870         return ret;
871 }
872
873 /*
874  * Back reference rules.  Back refs have three main goals:
875  *
876  * 1) differentiate between all holders of references to an extent so that
877  *    when a reference is dropped we can make sure it was a valid reference
878  *    before freeing the extent.
879  *
880  * 2) Provide enough information to quickly find the holders of an extent
881  *    if we notice a given block is corrupted or bad.
882  *
883  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
884  *    maintenance.  This is actually the same as #2, but with a slightly
885  *    different use case.
886  *
887  * There are two kinds of back refs. The implicit back refs is optimized
888  * for pointers in non-shared tree blocks. For a given pointer in a block,
889  * back refs of this kind provide information about the block's owner tree
890  * and the pointer's key. These information allow us to find the block by
891  * b-tree searching. The full back refs is for pointers in tree blocks not
892  * referenced by their owner trees. The location of tree block is recorded
893  * in the back refs. Actually the full back refs is generic, and can be
894  * used in all cases the implicit back refs is used. The major shortcoming
895  * of the full back refs is its overhead. Every time a tree block gets
896  * COWed, we have to update back refs entry for all pointers in it.
897  *
898  * For a newly allocated tree block, we use implicit back refs for
899  * pointers in it. This means most tree related operations only involve
900  * implicit back refs. For a tree block created in old transaction, the
901  * only way to drop a reference to it is COW it. So we can detect the
902  * event that tree block loses its owner tree's reference and do the
903  * back refs conversion.
904  *
905  * When a tree block is COW'd through a tree, there are four cases:
906  *
907  * The reference count of the block is one and the tree is the block's
908  * owner tree. Nothing to do in this case.
909  *
910  * The reference count of the block is one and the tree is not the
911  * block's owner tree. In this case, full back refs is used for pointers
912  * in the block. Remove these full back refs, add implicit back refs for
913  * every pointers in the new block.
914  *
915  * The reference count of the block is greater than one and the tree is
916  * the block's owner tree. In this case, implicit back refs is used for
917  * pointers in the block. Add full back refs for every pointers in the
918  * block, increase lower level extents' reference counts. The original
919  * implicit back refs are entailed to the new block.
920  *
921  * The reference count of the block is greater than one and the tree is
922  * not the block's owner tree. Add implicit back refs for every pointer in
923  * the new block, increase lower level extents' reference count.
924  *
925  * Back Reference Key composing:
926  *
927  * The key objectid corresponds to the first byte in the extent,
928  * The key type is used to differentiate between types of back refs.
929  * There are different meanings of the key offset for different types
930  * of back refs.
931  *
932  * File extents can be referenced by:
933  *
934  * - multiple snapshots, subvolumes, or different generations in one subvol
935  * - different files inside a single subvolume
936  * - different offsets inside a file (bookend extents in file.c)
937  *
938  * The extent ref structure for the implicit back refs has fields for:
939  *
940  * - Objectid of the subvolume root
941  * - objectid of the file holding the reference
942  * - original offset in the file
943  * - how many bookend extents
944  *
945  * The key offset for the implicit back refs is hash of the first
946  * three fields.
947  *
948  * The extent ref structure for the full back refs has field for:
949  *
950  * - number of pointers in the tree leaf
951  *
952  * The key offset for the implicit back refs is the first byte of
953  * the tree leaf
954  *
955  * When a file extent is allocated, The implicit back refs is used.
956  * the fields are filled in:
957  *
958  *     (root_key.objectid, inode objectid, offset in file, 1)
959  *
960  * When a file extent is removed file truncation, we find the
961  * corresponding implicit back refs and check the following fields:
962  *
963  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
964  *
965  * Btree extents can be referenced by:
966  *
967  * - Different subvolumes
968  *
969  * Both the implicit back refs and the full back refs for tree blocks
970  * only consist of key. The key offset for the implicit back refs is
971  * objectid of block's owner tree. The key offset for the full back refs
972  * is the first byte of parent block.
973  *
974  * When implicit back refs is used, information about the lowest key and
975  * level of the tree block are required. These information are stored in
976  * tree block info structure.
977  */
978
979 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
980 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
981                                   struct btrfs_root *root,
982                                   struct btrfs_path *path,
983                                   u64 owner, u32 extra_size)
984 {
985         struct btrfs_extent_item *item;
986         struct btrfs_extent_item_v0 *ei0;
987         struct btrfs_extent_ref_v0 *ref0;
988         struct btrfs_tree_block_info *bi;
989         struct extent_buffer *leaf;
990         struct btrfs_key key;
991         struct btrfs_key found_key;
992         u32 new_size = sizeof(*item);
993         u64 refs;
994         int ret;
995
996         leaf = path->nodes[0];
997         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
998
999         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1000         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1001                              struct btrfs_extent_item_v0);
1002         refs = btrfs_extent_refs_v0(leaf, ei0);
1003
1004         if (owner == (u64)-1) {
1005                 while (1) {
1006                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1007                                 ret = btrfs_next_leaf(root, path);
1008                                 if (ret < 0)
1009                                         return ret;
1010                                 BUG_ON(ret > 0); /* Corruption */
1011                                 leaf = path->nodes[0];
1012                         }
1013                         btrfs_item_key_to_cpu(leaf, &found_key,
1014                                               path->slots[0]);
1015                         BUG_ON(key.objectid != found_key.objectid);
1016                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1017                                 path->slots[0]++;
1018                                 continue;
1019                         }
1020                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1021                                               struct btrfs_extent_ref_v0);
1022                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1023                         break;
1024                 }
1025         }
1026         btrfs_release_path(path);
1027
1028         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1029                 new_size += sizeof(*bi);
1030
1031         new_size -= sizeof(*ei0);
1032         ret = btrfs_search_slot(trans, root, &key, path,
1033                                 new_size + extra_size, 1);
1034         if (ret < 0)
1035                 return ret;
1036         BUG_ON(ret); /* Corruption */
1037
1038         btrfs_extend_item(root, path, new_size);
1039
1040         leaf = path->nodes[0];
1041         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1042         btrfs_set_extent_refs(leaf, item, refs);
1043         /* FIXME: get real generation */
1044         btrfs_set_extent_generation(leaf, item, 0);
1045         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1046                 btrfs_set_extent_flags(leaf, item,
1047                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1048                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1049                 bi = (struct btrfs_tree_block_info *)(item + 1);
1050                 /* FIXME: get first key of the block */
1051                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1052                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1053         } else {
1054                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1055         }
1056         btrfs_mark_buffer_dirty(leaf);
1057         return 0;
1058 }
1059 #endif
1060
1061 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1062 {
1063         u32 high_crc = ~(u32)0;
1064         u32 low_crc = ~(u32)0;
1065         __le64 lenum;
1066
1067         lenum = cpu_to_le64(root_objectid);
1068         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1069         lenum = cpu_to_le64(owner);
1070         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1071         lenum = cpu_to_le64(offset);
1072         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1073
1074         return ((u64)high_crc << 31) ^ (u64)low_crc;
1075 }
1076
1077 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1078                                      struct btrfs_extent_data_ref *ref)
1079 {
1080         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1081                                     btrfs_extent_data_ref_objectid(leaf, ref),
1082                                     btrfs_extent_data_ref_offset(leaf, ref));
1083 }
1084
1085 static int match_extent_data_ref(struct extent_buffer *leaf,
1086                                  struct btrfs_extent_data_ref *ref,
1087                                  u64 root_objectid, u64 owner, u64 offset)
1088 {
1089         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1090             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1091             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1092                 return 0;
1093         return 1;
1094 }
1095
1096 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1097                                            struct btrfs_root *root,
1098                                            struct btrfs_path *path,
1099                                            u64 bytenr, u64 parent,
1100                                            u64 root_objectid,
1101                                            u64 owner, u64 offset)
1102 {
1103         struct btrfs_key key;
1104         struct btrfs_extent_data_ref *ref;
1105         struct extent_buffer *leaf;
1106         u32 nritems;
1107         int ret;
1108         int recow;
1109         int err = -ENOENT;
1110
1111         key.objectid = bytenr;
1112         if (parent) {
1113                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1114                 key.offset = parent;
1115         } else {
1116                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1117                 key.offset = hash_extent_data_ref(root_objectid,
1118                                                   owner, offset);
1119         }
1120 again:
1121         recow = 0;
1122         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1123         if (ret < 0) {
1124                 err = ret;
1125                 goto fail;
1126         }
1127
1128         if (parent) {
1129                 if (!ret)
1130                         return 0;
1131 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1133                 btrfs_release_path(path);
1134                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1135                 if (ret < 0) {
1136                         err = ret;
1137                         goto fail;
1138                 }
1139                 if (!ret)
1140                         return 0;
1141 #endif
1142                 goto fail;
1143         }
1144
1145         leaf = path->nodes[0];
1146         nritems = btrfs_header_nritems(leaf);
1147         while (1) {
1148                 if (path->slots[0] >= nritems) {
1149                         ret = btrfs_next_leaf(root, path);
1150                         if (ret < 0)
1151                                 err = ret;
1152                         if (ret)
1153                                 goto fail;
1154
1155                         leaf = path->nodes[0];
1156                         nritems = btrfs_header_nritems(leaf);
1157                         recow = 1;
1158                 }
1159
1160                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161                 if (key.objectid != bytenr ||
1162                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1163                         goto fail;
1164
1165                 ref = btrfs_item_ptr(leaf, path->slots[0],
1166                                      struct btrfs_extent_data_ref);
1167
1168                 if (match_extent_data_ref(leaf, ref, root_objectid,
1169                                           owner, offset)) {
1170                         if (recow) {
1171                                 btrfs_release_path(path);
1172                                 goto again;
1173                         }
1174                         err = 0;
1175                         break;
1176                 }
1177                 path->slots[0]++;
1178         }
1179 fail:
1180         return err;
1181 }
1182
1183 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1184                                            struct btrfs_root *root,
1185                                            struct btrfs_path *path,
1186                                            u64 bytenr, u64 parent,
1187                                            u64 root_objectid, u64 owner,
1188                                            u64 offset, int refs_to_add)
1189 {
1190         struct btrfs_key key;
1191         struct extent_buffer *leaf;
1192         u32 size;
1193         u32 num_refs;
1194         int ret;
1195
1196         key.objectid = bytenr;
1197         if (parent) {
1198                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1199                 key.offset = parent;
1200                 size = sizeof(struct btrfs_shared_data_ref);
1201         } else {
1202                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1203                 key.offset = hash_extent_data_ref(root_objectid,
1204                                                   owner, offset);
1205                 size = sizeof(struct btrfs_extent_data_ref);
1206         }
1207
1208         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1209         if (ret && ret != -EEXIST)
1210                 goto fail;
1211
1212         leaf = path->nodes[0];
1213         if (parent) {
1214                 struct btrfs_shared_data_ref *ref;
1215                 ref = btrfs_item_ptr(leaf, path->slots[0],
1216                                      struct btrfs_shared_data_ref);
1217                 if (ret == 0) {
1218                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1219                 } else {
1220                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1221                         num_refs += refs_to_add;
1222                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1223                 }
1224         } else {
1225                 struct btrfs_extent_data_ref *ref;
1226                 while (ret == -EEXIST) {
1227                         ref = btrfs_item_ptr(leaf, path->slots[0],
1228                                              struct btrfs_extent_data_ref);
1229                         if (match_extent_data_ref(leaf, ref, root_objectid,
1230                                                   owner, offset))
1231                                 break;
1232                         btrfs_release_path(path);
1233                         key.offset++;
1234                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1235                                                       size);
1236                         if (ret && ret != -EEXIST)
1237                                 goto fail;
1238
1239                         leaf = path->nodes[0];
1240                 }
1241                 ref = btrfs_item_ptr(leaf, path->slots[0],
1242                                      struct btrfs_extent_data_ref);
1243                 if (ret == 0) {
1244                         btrfs_set_extent_data_ref_root(leaf, ref,
1245                                                        root_objectid);
1246                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1247                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1248                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1249                 } else {
1250                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1251                         num_refs += refs_to_add;
1252                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1253                 }
1254         }
1255         btrfs_mark_buffer_dirty(leaf);
1256         ret = 0;
1257 fail:
1258         btrfs_release_path(path);
1259         return ret;
1260 }
1261
1262 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1263                                            struct btrfs_root *root,
1264                                            struct btrfs_path *path,
1265                                            int refs_to_drop, int *last_ref)
1266 {
1267         struct btrfs_key key;
1268         struct btrfs_extent_data_ref *ref1 = NULL;
1269         struct btrfs_shared_data_ref *ref2 = NULL;
1270         struct extent_buffer *leaf;
1271         u32 num_refs = 0;
1272         int ret = 0;
1273
1274         leaf = path->nodes[0];
1275         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1276
1277         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1278                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1279                                       struct btrfs_extent_data_ref);
1280                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1281         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1282                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1283                                       struct btrfs_shared_data_ref);
1284                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1285 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1286         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1287                 struct btrfs_extent_ref_v0 *ref0;
1288                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1289                                       struct btrfs_extent_ref_v0);
1290                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1291 #endif
1292         } else {
1293                 BUG();
1294         }
1295
1296         BUG_ON(num_refs < refs_to_drop);
1297         num_refs -= refs_to_drop;
1298
1299         if (num_refs == 0) {
1300                 ret = btrfs_del_item(trans, root, path);
1301                 *last_ref = 1;
1302         } else {
1303                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1304                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1305                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1306                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1307 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1308                 else {
1309                         struct btrfs_extent_ref_v0 *ref0;
1310                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1311                                         struct btrfs_extent_ref_v0);
1312                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1313                 }
1314 #endif
1315                 btrfs_mark_buffer_dirty(leaf);
1316         }
1317         return ret;
1318 }
1319
1320 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1321                                           struct btrfs_path *path,
1322                                           struct btrfs_extent_inline_ref *iref)
1323 {
1324         struct btrfs_key key;
1325         struct extent_buffer *leaf;
1326         struct btrfs_extent_data_ref *ref1;
1327         struct btrfs_shared_data_ref *ref2;
1328         u32 num_refs = 0;
1329
1330         leaf = path->nodes[0];
1331         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1332         if (iref) {
1333                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1334                     BTRFS_EXTENT_DATA_REF_KEY) {
1335                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1336                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1337                 } else {
1338                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1339                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1340                 }
1341         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1342                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1343                                       struct btrfs_extent_data_ref);
1344                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1346                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1347                                       struct btrfs_shared_data_ref);
1348                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1349 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1350         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1351                 struct btrfs_extent_ref_v0 *ref0;
1352                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1353                                       struct btrfs_extent_ref_v0);
1354                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1355 #endif
1356         } else {
1357                 WARN_ON(1);
1358         }
1359         return num_refs;
1360 }
1361
1362 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1363                                           struct btrfs_root *root,
1364                                           struct btrfs_path *path,
1365                                           u64 bytenr, u64 parent,
1366                                           u64 root_objectid)
1367 {
1368         struct btrfs_key key;
1369         int ret;
1370
1371         key.objectid = bytenr;
1372         if (parent) {
1373                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1374                 key.offset = parent;
1375         } else {
1376                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1377                 key.offset = root_objectid;
1378         }
1379
1380         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1381         if (ret > 0)
1382                 ret = -ENOENT;
1383 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1384         if (ret == -ENOENT && parent) {
1385                 btrfs_release_path(path);
1386                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1387                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388                 if (ret > 0)
1389                         ret = -ENOENT;
1390         }
1391 #endif
1392         return ret;
1393 }
1394
1395 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1396                                           struct btrfs_root *root,
1397                                           struct btrfs_path *path,
1398                                           u64 bytenr, u64 parent,
1399                                           u64 root_objectid)
1400 {
1401         struct btrfs_key key;
1402         int ret;
1403
1404         key.objectid = bytenr;
1405         if (parent) {
1406                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1407                 key.offset = parent;
1408         } else {
1409                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1410                 key.offset = root_objectid;
1411         }
1412
1413         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1414         btrfs_release_path(path);
1415         return ret;
1416 }
1417
1418 static inline int extent_ref_type(u64 parent, u64 owner)
1419 {
1420         int type;
1421         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1422                 if (parent > 0)
1423                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1424                 else
1425                         type = BTRFS_TREE_BLOCK_REF_KEY;
1426         } else {
1427                 if (parent > 0)
1428                         type = BTRFS_SHARED_DATA_REF_KEY;
1429                 else
1430                         type = BTRFS_EXTENT_DATA_REF_KEY;
1431         }
1432         return type;
1433 }
1434
1435 static int find_next_key(struct btrfs_path *path, int level,
1436                          struct btrfs_key *key)
1437
1438 {
1439         for (; level < BTRFS_MAX_LEVEL; level++) {
1440                 if (!path->nodes[level])
1441                         break;
1442                 if (path->slots[level] + 1 >=
1443                     btrfs_header_nritems(path->nodes[level]))
1444                         continue;
1445                 if (level == 0)
1446                         btrfs_item_key_to_cpu(path->nodes[level], key,
1447                                               path->slots[level] + 1);
1448                 else
1449                         btrfs_node_key_to_cpu(path->nodes[level], key,
1450                                               path->slots[level] + 1);
1451                 return 0;
1452         }
1453         return 1;
1454 }
1455
1456 /*
1457  * look for inline back ref. if back ref is found, *ref_ret is set
1458  * to the address of inline back ref, and 0 is returned.
1459  *
1460  * if back ref isn't found, *ref_ret is set to the address where it
1461  * should be inserted, and -ENOENT is returned.
1462  *
1463  * if insert is true and there are too many inline back refs, the path
1464  * points to the extent item, and -EAGAIN is returned.
1465  *
1466  * NOTE: inline back refs are ordered in the same way that back ref
1467  *       items in the tree are ordered.
1468  */
1469 static noinline_for_stack
1470 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1471                                  struct btrfs_root *root,
1472                                  struct btrfs_path *path,
1473                                  struct btrfs_extent_inline_ref **ref_ret,
1474                                  u64 bytenr, u64 num_bytes,
1475                                  u64 parent, u64 root_objectid,
1476                                  u64 owner, u64 offset, int insert)
1477 {
1478         struct btrfs_key key;
1479         struct extent_buffer *leaf;
1480         struct btrfs_extent_item *ei;
1481         struct btrfs_extent_inline_ref *iref;
1482         u64 flags;
1483         u64 item_size;
1484         unsigned long ptr;
1485         unsigned long end;
1486         int extra_size;
1487         int type;
1488         int want;
1489         int ret;
1490         int err = 0;
1491         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1492                                                  SKINNY_METADATA);
1493
1494         key.objectid = bytenr;
1495         key.type = BTRFS_EXTENT_ITEM_KEY;
1496         key.offset = num_bytes;
1497
1498         want = extent_ref_type(parent, owner);
1499         if (insert) {
1500                 extra_size = btrfs_extent_inline_ref_size(want);
1501                 path->keep_locks = 1;
1502         } else
1503                 extra_size = -1;
1504
1505         /*
1506          * Owner is our parent level, so we can just add one to get the level
1507          * for the block we are interested in.
1508          */
1509         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1510                 key.type = BTRFS_METADATA_ITEM_KEY;
1511                 key.offset = owner;
1512         }
1513
1514 again:
1515         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1516         if (ret < 0) {
1517                 err = ret;
1518                 goto out;
1519         }
1520
1521         /*
1522          * We may be a newly converted file system which still has the old fat
1523          * extent entries for metadata, so try and see if we have one of those.
1524          */
1525         if (ret > 0 && skinny_metadata) {
1526                 skinny_metadata = false;
1527                 if (path->slots[0]) {
1528                         path->slots[0]--;
1529                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1530                                               path->slots[0]);
1531                         if (key.objectid == bytenr &&
1532                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1533                             key.offset == num_bytes)
1534                                 ret = 0;
1535                 }
1536                 if (ret) {
1537                         key.objectid = bytenr;
1538                         key.type = BTRFS_EXTENT_ITEM_KEY;
1539                         key.offset = num_bytes;
1540                         btrfs_release_path(path);
1541                         goto again;
1542                 }
1543         }
1544
1545         if (ret && !insert) {
1546                 err = -ENOENT;
1547                 goto out;
1548         } else if (WARN_ON(ret)) {
1549                 err = -EIO;
1550                 goto out;
1551         }
1552
1553         leaf = path->nodes[0];
1554         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1555 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1556         if (item_size < sizeof(*ei)) {
1557                 if (!insert) {
1558                         err = -ENOENT;
1559                         goto out;
1560                 }
1561                 ret = convert_extent_item_v0(trans, root, path, owner,
1562                                              extra_size);
1563                 if (ret < 0) {
1564                         err = ret;
1565                         goto out;
1566                 }
1567                 leaf = path->nodes[0];
1568                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1569         }
1570 #endif
1571         BUG_ON(item_size < sizeof(*ei));
1572
1573         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1574         flags = btrfs_extent_flags(leaf, ei);
1575
1576         ptr = (unsigned long)(ei + 1);
1577         end = (unsigned long)ei + item_size;
1578
1579         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1580                 ptr += sizeof(struct btrfs_tree_block_info);
1581                 BUG_ON(ptr > end);
1582         }
1583
1584         err = -ENOENT;
1585         while (1) {
1586                 if (ptr >= end) {
1587                         WARN_ON(ptr > end);
1588                         break;
1589                 }
1590                 iref = (struct btrfs_extent_inline_ref *)ptr;
1591                 type = btrfs_extent_inline_ref_type(leaf, iref);
1592                 if (want < type)
1593                         break;
1594                 if (want > type) {
1595                         ptr += btrfs_extent_inline_ref_size(type);
1596                         continue;
1597                 }
1598
1599                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1600                         struct btrfs_extent_data_ref *dref;
1601                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1602                         if (match_extent_data_ref(leaf, dref, root_objectid,
1603                                                   owner, offset)) {
1604                                 err = 0;
1605                                 break;
1606                         }
1607                         if (hash_extent_data_ref_item(leaf, dref) <
1608                             hash_extent_data_ref(root_objectid, owner, offset))
1609                                 break;
1610                 } else {
1611                         u64 ref_offset;
1612                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1613                         if (parent > 0) {
1614                                 if (parent == ref_offset) {
1615                                         err = 0;
1616                                         break;
1617                                 }
1618                                 if (ref_offset < parent)
1619                                         break;
1620                         } else {
1621                                 if (root_objectid == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < root_objectid)
1626                                         break;
1627                         }
1628                 }
1629                 ptr += btrfs_extent_inline_ref_size(type);
1630         }
1631         if (err == -ENOENT && insert) {
1632                 if (item_size + extra_size >=
1633                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1634                         err = -EAGAIN;
1635                         goto out;
1636                 }
1637                 /*
1638                  * To add new inline back ref, we have to make sure
1639                  * there is no corresponding back ref item.
1640                  * For simplicity, we just do not add new inline back
1641                  * ref if there is any kind of item for this block
1642                  */
1643                 if (find_next_key(path, 0, &key) == 0 &&
1644                     key.objectid == bytenr &&
1645                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1646                         err = -EAGAIN;
1647                         goto out;
1648                 }
1649         }
1650         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1651 out:
1652         if (insert) {
1653                 path->keep_locks = 0;
1654                 btrfs_unlock_up_safe(path, 1);
1655         }
1656         return err;
1657 }
1658
1659 /*
1660  * helper to add new inline back ref
1661  */
1662 static noinline_for_stack
1663 void setup_inline_extent_backref(struct btrfs_root *root,
1664                                  struct btrfs_path *path,
1665                                  struct btrfs_extent_inline_ref *iref,
1666                                  u64 parent, u64 root_objectid,
1667                                  u64 owner, u64 offset, int refs_to_add,
1668                                  struct btrfs_delayed_extent_op *extent_op)
1669 {
1670         struct extent_buffer *leaf;
1671         struct btrfs_extent_item *ei;
1672         unsigned long ptr;
1673         unsigned long end;
1674         unsigned long item_offset;
1675         u64 refs;
1676         int size;
1677         int type;
1678
1679         leaf = path->nodes[0];
1680         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681         item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683         type = extent_ref_type(parent, owner);
1684         size = btrfs_extent_inline_ref_size(type);
1685
1686         btrfs_extend_item(root, path, size);
1687
1688         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689         refs = btrfs_extent_refs(leaf, ei);
1690         refs += refs_to_add;
1691         btrfs_set_extent_refs(leaf, ei, refs);
1692         if (extent_op)
1693                 __run_delayed_extent_op(extent_op, leaf, ei);
1694
1695         ptr = (unsigned long)ei + item_offset;
1696         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697         if (ptr < end - size)
1698                 memmove_extent_buffer(leaf, ptr + size, ptr,
1699                                       end - size - ptr);
1700
1701         iref = (struct btrfs_extent_inline_ref *)ptr;
1702         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704                 struct btrfs_extent_data_ref *dref;
1705                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 struct btrfs_shared_data_ref *sref;
1712                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717         } else {
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719         }
1720         btrfs_mark_buffer_dirty(leaf);
1721 }
1722
1723 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724                                  struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref **ref_ret,
1727                                  u64 bytenr, u64 num_bytes, u64 parent,
1728                                  u64 root_objectid, u64 owner, u64 offset)
1729 {
1730         int ret;
1731
1732         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733                                            bytenr, num_bytes, parent,
1734                                            root_objectid, owner, offset, 0);
1735         if (ret != -ENOENT)
1736                 return ret;
1737
1738         btrfs_release_path(path);
1739         *ref_ret = NULL;
1740
1741         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743                                             root_objectid);
1744         } else {
1745                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746                                              root_objectid, owner, offset);
1747         }
1748         return ret;
1749 }
1750
1751 /*
1752  * helper to update/remove inline back ref
1753  */
1754 static noinline_for_stack
1755 void update_inline_extent_backref(struct btrfs_root *root,
1756                                   struct btrfs_path *path,
1757                                   struct btrfs_extent_inline_ref *iref,
1758                                   int refs_to_mod,
1759                                   struct btrfs_delayed_extent_op *extent_op,
1760                                   int *last_ref)
1761 {
1762         struct extent_buffer *leaf;
1763         struct btrfs_extent_item *ei;
1764         struct btrfs_extent_data_ref *dref = NULL;
1765         struct btrfs_shared_data_ref *sref = NULL;
1766         unsigned long ptr;
1767         unsigned long end;
1768         u32 item_size;
1769         int size;
1770         int type;
1771         u64 refs;
1772
1773         leaf = path->nodes[0];
1774         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1775         refs = btrfs_extent_refs(leaf, ei);
1776         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1777         refs += refs_to_mod;
1778         btrfs_set_extent_refs(leaf, ei, refs);
1779         if (extent_op)
1780                 __run_delayed_extent_op(extent_op, leaf, ei);
1781
1782         type = btrfs_extent_inline_ref_type(leaf, iref);
1783
1784         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1785                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1786                 refs = btrfs_extent_data_ref_count(leaf, dref);
1787         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1789                 refs = btrfs_shared_data_ref_count(leaf, sref);
1790         } else {
1791                 refs = 1;
1792                 BUG_ON(refs_to_mod != -1);
1793         }
1794
1795         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1796         refs += refs_to_mod;
1797
1798         if (refs > 0) {
1799                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1800                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1801                 else
1802                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1803         } else {
1804                 *last_ref = 1;
1805                 size =  btrfs_extent_inline_ref_size(type);
1806                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1807                 ptr = (unsigned long)iref;
1808                 end = (unsigned long)ei + item_size;
1809                 if (ptr + size < end)
1810                         memmove_extent_buffer(leaf, ptr, ptr + size,
1811                                               end - ptr - size);
1812                 item_size -= size;
1813                 btrfs_truncate_item(root, path, item_size, 1);
1814         }
1815         btrfs_mark_buffer_dirty(leaf);
1816 }
1817
1818 static noinline_for_stack
1819 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1820                                  struct btrfs_root *root,
1821                                  struct btrfs_path *path,
1822                                  u64 bytenr, u64 num_bytes, u64 parent,
1823                                  u64 root_objectid, u64 owner,
1824                                  u64 offset, int refs_to_add,
1825                                  struct btrfs_delayed_extent_op *extent_op)
1826 {
1827         struct btrfs_extent_inline_ref *iref;
1828         int ret;
1829
1830         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1831                                            bytenr, num_bytes, parent,
1832                                            root_objectid, owner, offset, 1);
1833         if (ret == 0) {
1834                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1835                 update_inline_extent_backref(root, path, iref,
1836                                              refs_to_add, extent_op, NULL);
1837         } else if (ret == -ENOENT) {
1838                 setup_inline_extent_backref(root, path, iref, parent,
1839                                             root_objectid, owner, offset,
1840                                             refs_to_add, extent_op);
1841                 ret = 0;
1842         }
1843         return ret;
1844 }
1845
1846 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1847                                  struct btrfs_root *root,
1848                                  struct btrfs_path *path,
1849                                  u64 bytenr, u64 parent, u64 root_objectid,
1850                                  u64 owner, u64 offset, int refs_to_add)
1851 {
1852         int ret;
1853         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1854                 BUG_ON(refs_to_add != 1);
1855                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1856                                             parent, root_objectid);
1857         } else {
1858                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1859                                              parent, root_objectid,
1860                                              owner, offset, refs_to_add);
1861         }
1862         return ret;
1863 }
1864
1865 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1866                                  struct btrfs_root *root,
1867                                  struct btrfs_path *path,
1868                                  struct btrfs_extent_inline_ref *iref,
1869                                  int refs_to_drop, int is_data, int *last_ref)
1870 {
1871         int ret = 0;
1872
1873         BUG_ON(!is_data && refs_to_drop != 1);
1874         if (iref) {
1875                 update_inline_extent_backref(root, path, iref,
1876                                              -refs_to_drop, NULL, last_ref);
1877         } else if (is_data) {
1878                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1879                                              last_ref);
1880         } else {
1881                 *last_ref = 1;
1882                 ret = btrfs_del_item(trans, root, path);
1883         }
1884         return ret;
1885 }
1886
1887 static int btrfs_issue_discard(struct block_device *bdev,
1888                                 u64 start, u64 len)
1889 {
1890         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891 }
1892
1893 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894                          u64 num_bytes, u64 *actual_bytes)
1895 {
1896         int ret;
1897         u64 discarded_bytes = 0;
1898         struct btrfs_bio *bbio = NULL;
1899
1900
1901         /* Tell the block device(s) that the sectors can be discarded */
1902         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903                               bytenr, &num_bytes, &bbio, 0);
1904         /* Error condition is -ENOMEM */
1905         if (!ret) {
1906                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1907                 int i;
1908
1909
1910                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911                         if (!stripe->dev->can_discard)
1912                                 continue;
1913
1914                         ret = btrfs_issue_discard(stripe->dev->bdev,
1915                                                   stripe->physical,
1916                                                   stripe->length);
1917                         if (!ret)
1918                                 discarded_bytes += stripe->length;
1919                         else if (ret != -EOPNOTSUPP)
1920                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922                         /*
1923                          * Just in case we get back EOPNOTSUPP for some reason,
1924                          * just ignore the return value so we don't screw up
1925                          * people calling discard_extent.
1926                          */
1927                         ret = 0;
1928                 }
1929                 btrfs_put_bbio(bbio);
1930         }
1931
1932         if (actual_bytes)
1933                 *actual_bytes = discarded_bytes;
1934
1935
1936         if (ret == -EOPNOTSUPP)
1937                 ret = 0;
1938         return ret;
1939 }
1940
1941 /* Can return -ENOMEM */
1942 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943                          struct btrfs_root *root,
1944                          u64 bytenr, u64 num_bytes, u64 parent,
1945                          u64 root_objectid, u64 owner, u64 offset,
1946                          int no_quota)
1947 {
1948         int ret;
1949         struct btrfs_fs_info *fs_info = root->fs_info;
1950
1951         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1952                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1953
1954         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1955                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1956                                         num_bytes,
1957                                         parent, root_objectid, (int)owner,
1958                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1959         } else {
1960                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1961                                         num_bytes,
1962                                         parent, root_objectid, owner, offset,
1963                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1964         }
1965         return ret;
1966 }
1967
1968 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1969                                   struct btrfs_root *root,
1970                                   u64 bytenr, u64 num_bytes,
1971                                   u64 parent, u64 root_objectid,
1972                                   u64 owner, u64 offset, int refs_to_add,
1973                                   int no_quota,
1974                                   struct btrfs_delayed_extent_op *extent_op)
1975 {
1976         struct btrfs_fs_info *fs_info = root->fs_info;
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         struct btrfs_key key;
1981         u64 refs;
1982         int ret;
1983         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1984
1985         path = btrfs_alloc_path();
1986         if (!path)
1987                 return -ENOMEM;
1988
1989         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1990                 no_quota = 1;
1991
1992         path->reada = 1;
1993         path->leave_spinning = 1;
1994         /* this will setup the path even if it fails to insert the back ref */
1995         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1996                                            bytenr, num_bytes, parent,
1997                                            root_objectid, owner, offset,
1998                                            refs_to_add, extent_op);
1999         if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2000                 goto out;
2001         /*
2002          * Ok we were able to insert an inline extent and it appears to be a new
2003          * reference, deal with the qgroup accounting.
2004          */
2005         if (!ret && !no_quota) {
2006                 ASSERT(root->fs_info->quota_enabled);
2007                 leaf = path->nodes[0];
2008                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2009                 item = btrfs_item_ptr(leaf, path->slots[0],
2010                                       struct btrfs_extent_item);
2011                 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2012                         type = BTRFS_QGROUP_OPER_ADD_SHARED;
2013                 btrfs_release_path(path);
2014
2015                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2016                                               bytenr, num_bytes, type, 0);
2017                 goto out;
2018         }
2019
2020         /*
2021          * Ok we had -EAGAIN which means we didn't have space to insert and
2022          * inline extent ref, so just update the reference count and add a
2023          * normal backref.
2024          */
2025         leaf = path->nodes[0];
2026         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2027         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2028         refs = btrfs_extent_refs(leaf, item);
2029         if (refs)
2030                 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2031         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2032         if (extent_op)
2033                 __run_delayed_extent_op(extent_op, leaf, item);
2034
2035         btrfs_mark_buffer_dirty(leaf);
2036         btrfs_release_path(path);
2037
2038         if (!no_quota) {
2039                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2040                                               bytenr, num_bytes, type, 0);
2041                 if (ret)
2042                         goto out;
2043         }
2044
2045         path->reada = 1;
2046         path->leave_spinning = 1;
2047         /* now insert the actual backref */
2048         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2049                                     path, bytenr, parent, root_objectid,
2050                                     owner, offset, refs_to_add);
2051         if (ret)
2052                 btrfs_abort_transaction(trans, root, ret);
2053 out:
2054         btrfs_free_path(path);
2055         return ret;
2056 }
2057
2058 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2059                                 struct btrfs_root *root,
2060                                 struct btrfs_delayed_ref_node *node,
2061                                 struct btrfs_delayed_extent_op *extent_op,
2062                                 int insert_reserved)
2063 {
2064         int ret = 0;
2065         struct btrfs_delayed_data_ref *ref;
2066         struct btrfs_key ins;
2067         u64 parent = 0;
2068         u64 ref_root = 0;
2069         u64 flags = 0;
2070
2071         ins.objectid = node->bytenr;
2072         ins.offset = node->num_bytes;
2073         ins.type = BTRFS_EXTENT_ITEM_KEY;
2074
2075         ref = btrfs_delayed_node_to_data_ref(node);
2076         trace_run_delayed_data_ref(node, ref, node->action);
2077
2078         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2079                 parent = ref->parent;
2080         ref_root = ref->root;
2081
2082         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2083                 if (extent_op)
2084                         flags |= extent_op->flags_to_set;
2085                 ret = alloc_reserved_file_extent(trans, root,
2086                                                  parent, ref_root, flags,
2087                                                  ref->objectid, ref->offset,
2088                                                  &ins, node->ref_mod);
2089         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2090                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2091                                              node->num_bytes, parent,
2092                                              ref_root, ref->objectid,
2093                                              ref->offset, node->ref_mod,
2094                                              node->no_quota, extent_op);
2095         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2096                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2097                                           node->num_bytes, parent,
2098                                           ref_root, ref->objectid,
2099                                           ref->offset, node->ref_mod,
2100                                           extent_op, node->no_quota);
2101         } else {
2102                 BUG();
2103         }
2104         return ret;
2105 }
2106
2107 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2108                                     struct extent_buffer *leaf,
2109                                     struct btrfs_extent_item *ei)
2110 {
2111         u64 flags = btrfs_extent_flags(leaf, ei);
2112         if (extent_op->update_flags) {
2113                 flags |= extent_op->flags_to_set;
2114                 btrfs_set_extent_flags(leaf, ei, flags);
2115         }
2116
2117         if (extent_op->update_key) {
2118                 struct btrfs_tree_block_info *bi;
2119                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2120                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2121                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2122         }
2123 }
2124
2125 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2126                                  struct btrfs_root *root,
2127                                  struct btrfs_delayed_ref_node *node,
2128                                  struct btrfs_delayed_extent_op *extent_op)
2129 {
2130         struct btrfs_key key;
2131         struct btrfs_path *path;
2132         struct btrfs_extent_item *ei;
2133         struct extent_buffer *leaf;
2134         u32 item_size;
2135         int ret;
2136         int err = 0;
2137         int metadata = !extent_op->is_data;
2138
2139         if (trans->aborted)
2140                 return 0;
2141
2142         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2143                 metadata = 0;
2144
2145         path = btrfs_alloc_path();
2146         if (!path)
2147                 return -ENOMEM;
2148
2149         key.objectid = node->bytenr;
2150
2151         if (metadata) {
2152                 key.type = BTRFS_METADATA_ITEM_KEY;
2153                 key.offset = extent_op->level;
2154         } else {
2155                 key.type = BTRFS_EXTENT_ITEM_KEY;
2156                 key.offset = node->num_bytes;
2157         }
2158
2159 again:
2160         path->reada = 1;
2161         path->leave_spinning = 1;
2162         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2163                                 path, 0, 1);
2164         if (ret < 0) {
2165                 err = ret;
2166                 goto out;
2167         }
2168         if (ret > 0) {
2169                 if (metadata) {
2170                         if (path->slots[0] > 0) {
2171                                 path->slots[0]--;
2172                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2173                                                       path->slots[0]);
2174                                 if (key.objectid == node->bytenr &&
2175                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2176                                     key.offset == node->num_bytes)
2177                                         ret = 0;
2178                         }
2179                         if (ret > 0) {
2180                                 btrfs_release_path(path);
2181                                 metadata = 0;
2182
2183                                 key.objectid = node->bytenr;
2184                                 key.offset = node->num_bytes;
2185                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2186                                 goto again;
2187                         }
2188                 } else {
2189                         err = -EIO;
2190                         goto out;
2191                 }
2192         }
2193
2194         leaf = path->nodes[0];
2195         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2196 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2197         if (item_size < sizeof(*ei)) {
2198                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2199                                              path, (u64)-1, 0);
2200                 if (ret < 0) {
2201                         err = ret;
2202                         goto out;
2203                 }
2204                 leaf = path->nodes[0];
2205                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2206         }
2207 #endif
2208         BUG_ON(item_size < sizeof(*ei));
2209         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2210         __run_delayed_extent_op(extent_op, leaf, ei);
2211
2212         btrfs_mark_buffer_dirty(leaf);
2213 out:
2214         btrfs_free_path(path);
2215         return err;
2216 }
2217
2218 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2219                                 struct btrfs_root *root,
2220                                 struct btrfs_delayed_ref_node *node,
2221                                 struct btrfs_delayed_extent_op *extent_op,
2222                                 int insert_reserved)
2223 {
2224         int ret = 0;
2225         struct btrfs_delayed_tree_ref *ref;
2226         struct btrfs_key ins;
2227         u64 parent = 0;
2228         u64 ref_root = 0;
2229         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2230                                                  SKINNY_METADATA);
2231
2232         ref = btrfs_delayed_node_to_tree_ref(node);
2233         trace_run_delayed_tree_ref(node, ref, node->action);
2234
2235         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2236                 parent = ref->parent;
2237         ref_root = ref->root;
2238
2239         ins.objectid = node->bytenr;
2240         if (skinny_metadata) {
2241                 ins.offset = ref->level;
2242                 ins.type = BTRFS_METADATA_ITEM_KEY;
2243         } else {
2244                 ins.offset = node->num_bytes;
2245                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2246         }
2247
2248         BUG_ON(node->ref_mod != 1);
2249         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2250                 BUG_ON(!extent_op || !extent_op->update_flags);
2251                 ret = alloc_reserved_tree_block(trans, root,
2252                                                 parent, ref_root,
2253                                                 extent_op->flags_to_set,
2254                                                 &extent_op->key,
2255                                                 ref->level, &ins,
2256                                                 node->no_quota);
2257         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2258                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2259                                              node->num_bytes, parent, ref_root,
2260                                              ref->level, 0, 1, node->no_quota,
2261                                              extent_op);
2262         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2263                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2264                                           node->num_bytes, parent, ref_root,
2265                                           ref->level, 0, 1, extent_op,
2266                                           node->no_quota);
2267         } else {
2268                 BUG();
2269         }
2270         return ret;
2271 }
2272
2273 /* helper function to actually process a single delayed ref entry */
2274 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2275                                struct btrfs_root *root,
2276                                struct btrfs_delayed_ref_node *node,
2277                                struct btrfs_delayed_extent_op *extent_op,
2278                                int insert_reserved)
2279 {
2280         int ret = 0;
2281
2282         if (trans->aborted) {
2283                 if (insert_reserved)
2284                         btrfs_pin_extent(root, node->bytenr,
2285                                          node->num_bytes, 1);
2286                 return 0;
2287         }
2288
2289         if (btrfs_delayed_ref_is_head(node)) {
2290                 struct btrfs_delayed_ref_head *head;
2291                 /*
2292                  * we've hit the end of the chain and we were supposed
2293                  * to insert this extent into the tree.  But, it got
2294                  * deleted before we ever needed to insert it, so all
2295                  * we have to do is clean up the accounting
2296                  */
2297                 BUG_ON(extent_op);
2298                 head = btrfs_delayed_node_to_head(node);
2299                 trace_run_delayed_ref_head(node, head, node->action);
2300
2301                 if (insert_reserved) {
2302                         btrfs_pin_extent(root, node->bytenr,
2303                                          node->num_bytes, 1);
2304                         if (head->is_data) {
2305                                 ret = btrfs_del_csums(trans, root,
2306                                                       node->bytenr,
2307                                                       node->num_bytes);
2308                         }
2309                 }
2310                 return ret;
2311         }
2312
2313         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2314             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2315                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2316                                            insert_reserved);
2317         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2318                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2319                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2320                                            insert_reserved);
2321         else
2322                 BUG();
2323         return ret;
2324 }
2325
2326 static noinline struct btrfs_delayed_ref_node *
2327 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2328 {
2329         struct rb_node *node;
2330         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2331
2332         /*
2333          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2334          * this prevents ref count from going down to zero when
2335          * there still are pending delayed ref.
2336          */
2337         node = rb_first(&head->ref_root);
2338         while (node) {
2339                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2340                                 rb_node);
2341                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2342                         return ref;
2343                 else if (last == NULL)
2344                         last = ref;
2345                 node = rb_next(node);
2346         }
2347         return last;
2348 }
2349
2350 /*
2351  * Returns 0 on success or if called with an already aborted transaction.
2352  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2353  */
2354 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2355                                              struct btrfs_root *root,
2356                                              unsigned long nr)
2357 {
2358         struct btrfs_delayed_ref_root *delayed_refs;
2359         struct btrfs_delayed_ref_node *ref;
2360         struct btrfs_delayed_ref_head *locked_ref = NULL;
2361         struct btrfs_delayed_extent_op *extent_op;
2362         struct btrfs_fs_info *fs_info = root->fs_info;
2363         ktime_t start = ktime_get();
2364         int ret;
2365         unsigned long count = 0;
2366         unsigned long actual_count = 0;
2367         int must_insert_reserved = 0;
2368
2369         delayed_refs = &trans->transaction->delayed_refs;
2370         while (1) {
2371                 if (!locked_ref) {
2372                         if (count >= nr)
2373                                 break;
2374
2375                         spin_lock(&delayed_refs->lock);
2376                         locked_ref = btrfs_select_ref_head(trans);
2377                         if (!locked_ref) {
2378                                 spin_unlock(&delayed_refs->lock);
2379                                 break;
2380                         }
2381
2382                         /* grab the lock that says we are going to process
2383                          * all the refs for this head */
2384                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2385                         spin_unlock(&delayed_refs->lock);
2386                         /*
2387                          * we may have dropped the spin lock to get the head
2388                          * mutex lock, and that might have given someone else
2389                          * time to free the head.  If that's true, it has been
2390                          * removed from our list and we can move on.
2391                          */
2392                         if (ret == -EAGAIN) {
2393                                 locked_ref = NULL;
2394                                 count++;
2395                                 continue;
2396                         }
2397                 }
2398
2399                 /*
2400                  * We need to try and merge add/drops of the same ref since we
2401                  * can run into issues with relocate dropping the implicit ref
2402                  * and then it being added back again before the drop can
2403                  * finish.  If we merged anything we need to re-loop so we can
2404                  * get a good ref.
2405                  */
2406                 spin_lock(&locked_ref->lock);
2407                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2408                                          locked_ref);
2409
2410                 /*
2411                  * locked_ref is the head node, so we have to go one
2412                  * node back for any delayed ref updates
2413                  */
2414                 ref = select_delayed_ref(locked_ref);
2415
2416                 if (ref && ref->seq &&
2417                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2418                         spin_unlock(&locked_ref->lock);
2419                         btrfs_delayed_ref_unlock(locked_ref);
2420                         spin_lock(&delayed_refs->lock);
2421                         locked_ref->processing = 0;
2422                         delayed_refs->num_heads_ready++;
2423                         spin_unlock(&delayed_refs->lock);
2424                         locked_ref = NULL;
2425                         cond_resched();
2426                         count++;
2427                         continue;
2428                 }
2429
2430                 /*
2431                  * record the must insert reserved flag before we
2432                  * drop the spin lock.
2433                  */
2434                 must_insert_reserved = locked_ref->must_insert_reserved;
2435                 locked_ref->must_insert_reserved = 0;
2436
2437                 extent_op = locked_ref->extent_op;
2438                 locked_ref->extent_op = NULL;
2439
2440                 if (!ref) {
2441
2442
2443                         /* All delayed refs have been processed, Go ahead
2444                          * and send the head node to run_one_delayed_ref,
2445                          * so that any accounting fixes can happen
2446                          */
2447                         ref = &locked_ref->node;
2448
2449                         if (extent_op && must_insert_reserved) {
2450                                 btrfs_free_delayed_extent_op(extent_op);
2451                                 extent_op = NULL;
2452                         }
2453
2454                         if (extent_op) {
2455                                 spin_unlock(&locked_ref->lock);
2456                                 ret = run_delayed_extent_op(trans, root,
2457                                                             ref, extent_op);
2458                                 btrfs_free_delayed_extent_op(extent_op);
2459
2460                                 if (ret) {
2461                                         /*
2462                                          * Need to reset must_insert_reserved if
2463                                          * there was an error so the abort stuff
2464                                          * can cleanup the reserved space
2465                                          * properly.
2466                                          */
2467                                         if (must_insert_reserved)
2468                                                 locked_ref->must_insert_reserved = 1;
2469                                         locked_ref->processing = 0;
2470                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2471                                         btrfs_delayed_ref_unlock(locked_ref);
2472                                         return ret;
2473                                 }
2474                                 continue;
2475                         }
2476
2477                         /*
2478                          * Need to drop our head ref lock and re-aqcuire the
2479                          * delayed ref lock and then re-check to make sure
2480                          * nobody got added.
2481                          */
2482                         spin_unlock(&locked_ref->lock);
2483                         spin_lock(&delayed_refs->lock);
2484                         spin_lock(&locked_ref->lock);
2485                         if (rb_first(&locked_ref->ref_root) ||
2486                             locked_ref->extent_op) {
2487                                 spin_unlock(&locked_ref->lock);
2488                                 spin_unlock(&delayed_refs->lock);
2489                                 continue;
2490                         }
2491                         ref->in_tree = 0;
2492                         delayed_refs->num_heads--;
2493                         rb_erase(&locked_ref->href_node,
2494                                  &delayed_refs->href_root);
2495                         spin_unlock(&delayed_refs->lock);
2496                 } else {
2497                         actual_count++;
2498                         ref->in_tree = 0;
2499                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2500                 }
2501                 atomic_dec(&delayed_refs->num_entries);
2502
2503                 if (!btrfs_delayed_ref_is_head(ref)) {
2504                         /*
2505                          * when we play the delayed ref, also correct the
2506                          * ref_mod on head
2507                          */
2508                         switch (ref->action) {
2509                         case BTRFS_ADD_DELAYED_REF:
2510                         case BTRFS_ADD_DELAYED_EXTENT:
2511                                 locked_ref->node.ref_mod -= ref->ref_mod;
2512                                 break;
2513                         case BTRFS_DROP_DELAYED_REF:
2514                                 locked_ref->node.ref_mod += ref->ref_mod;
2515                                 break;
2516                         default:
2517                                 WARN_ON(1);
2518                         }
2519                 }
2520                 spin_unlock(&locked_ref->lock);
2521
2522                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2523                                           must_insert_reserved);
2524
2525                 btrfs_free_delayed_extent_op(extent_op);
2526                 if (ret) {
2527                         locked_ref->processing = 0;
2528                         btrfs_delayed_ref_unlock(locked_ref);
2529                         btrfs_put_delayed_ref(ref);
2530                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2531                         return ret;
2532                 }
2533
2534                 /*
2535                  * If this node is a head, that means all the refs in this head
2536                  * have been dealt with, and we will pick the next head to deal
2537                  * with, so we must unlock the head and drop it from the cluster
2538                  * list before we release it.
2539                  */
2540                 if (btrfs_delayed_ref_is_head(ref)) {
2541                         if (locked_ref->is_data &&
2542                             locked_ref->total_ref_mod < 0) {
2543                                 spin_lock(&delayed_refs->lock);
2544                                 delayed_refs->pending_csums -= ref->num_bytes;
2545                                 spin_unlock(&delayed_refs->lock);
2546                         }
2547                         btrfs_delayed_ref_unlock(locked_ref);
2548                         locked_ref = NULL;
2549                 }
2550                 btrfs_put_delayed_ref(ref);
2551                 count++;
2552                 cond_resched();
2553         }
2554
2555         /*
2556          * We don't want to include ref heads since we can have empty ref heads
2557          * and those will drastically skew our runtime down since we just do
2558          * accounting, no actual extent tree updates.
2559          */
2560         if (actual_count > 0) {
2561                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2562                 u64 avg;
2563
2564                 /*
2565                  * We weigh the current average higher than our current runtime
2566                  * to avoid large swings in the average.
2567                  */
2568                 spin_lock(&delayed_refs->lock);
2569                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2570                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2571                 spin_unlock(&delayed_refs->lock);
2572         }
2573         return 0;
2574 }
2575
2576 #ifdef SCRAMBLE_DELAYED_REFS
2577 /*
2578  * Normally delayed refs get processed in ascending bytenr order. This
2579  * correlates in most cases to the order added. To expose dependencies on this
2580  * order, we start to process the tree in the middle instead of the beginning
2581  */
2582 static u64 find_middle(struct rb_root *root)
2583 {
2584         struct rb_node *n = root->rb_node;
2585         struct btrfs_delayed_ref_node *entry;
2586         int alt = 1;
2587         u64 middle;
2588         u64 first = 0, last = 0;
2589
2590         n = rb_first(root);
2591         if (n) {
2592                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2593                 first = entry->bytenr;
2594         }
2595         n = rb_last(root);
2596         if (n) {
2597                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2598                 last = entry->bytenr;
2599         }
2600         n = root->rb_node;
2601
2602         while (n) {
2603                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2604                 WARN_ON(!entry->in_tree);
2605
2606                 middle = entry->bytenr;
2607
2608                 if (alt)
2609                         n = n->rb_left;
2610                 else
2611                         n = n->rb_right;
2612
2613                 alt = 1 - alt;
2614         }
2615         return middle;
2616 }
2617 #endif
2618
2619 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2620 {
2621         u64 num_bytes;
2622
2623         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2624                              sizeof(struct btrfs_extent_inline_ref));
2625         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2626                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2627
2628         /*
2629          * We don't ever fill up leaves all the way so multiply by 2 just to be
2630          * closer to what we're really going to want to ouse.
2631          */
2632         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2633 }
2634
2635 /*
2636  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2637  * would require to store the csums for that many bytes.
2638  */
2639 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2640 {
2641         u64 csum_size;
2642         u64 num_csums_per_leaf;
2643         u64 num_csums;
2644
2645         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2646         num_csums_per_leaf = div64_u64(csum_size,
2647                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2648         num_csums = div64_u64(csum_bytes, root->sectorsize);
2649         num_csums += num_csums_per_leaf - 1;
2650         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2651         return num_csums;
2652 }
2653
2654 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2655                                        struct btrfs_root *root)
2656 {
2657         struct btrfs_block_rsv *global_rsv;
2658         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2659         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2660         u64 num_bytes;
2661         int ret = 0;
2662
2663         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2664         num_heads = heads_to_leaves(root, num_heads);
2665         if (num_heads > 1)
2666                 num_bytes += (num_heads - 1) * root->nodesize;
2667         num_bytes <<= 1;
2668         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2669         global_rsv = &root->fs_info->global_block_rsv;
2670
2671         /*
2672          * If we can't allocate any more chunks lets make sure we have _lots_ of
2673          * wiggle room since running delayed refs can create more delayed refs.
2674          */
2675         if (global_rsv->space_info->full)
2676                 num_bytes <<= 1;
2677
2678         spin_lock(&global_rsv->lock);
2679         if (global_rsv->reserved <= num_bytes)
2680                 ret = 1;
2681         spin_unlock(&global_rsv->lock);
2682         return ret;
2683 }
2684
2685 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2686                                        struct btrfs_root *root)
2687 {
2688         struct btrfs_fs_info *fs_info = root->fs_info;
2689         u64 num_entries =
2690                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2691         u64 avg_runtime;
2692         u64 val;
2693
2694         smp_mb();
2695         avg_runtime = fs_info->avg_delayed_ref_runtime;
2696         val = num_entries * avg_runtime;
2697         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2698                 return 1;
2699         if (val >= NSEC_PER_SEC / 2)
2700                 return 2;
2701
2702         return btrfs_check_space_for_delayed_refs(trans, root);
2703 }
2704
2705 struct async_delayed_refs {
2706         struct btrfs_root *root;
2707         int count;
2708         int error;
2709         int sync;
2710         struct completion wait;
2711         struct btrfs_work work;
2712 };
2713
2714 static void delayed_ref_async_start(struct btrfs_work *work)
2715 {
2716         struct async_delayed_refs *async;
2717         struct btrfs_trans_handle *trans;
2718         int ret;
2719
2720         async = container_of(work, struct async_delayed_refs, work);
2721
2722         trans = btrfs_join_transaction(async->root);
2723         if (IS_ERR(trans)) {
2724                 async->error = PTR_ERR(trans);
2725                 goto done;
2726         }
2727
2728         /*
2729          * trans->sync means that when we call end_transaciton, we won't
2730          * wait on delayed refs
2731          */
2732         trans->sync = true;
2733         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2734         if (ret)
2735                 async->error = ret;
2736
2737         ret = btrfs_end_transaction(trans, async->root);
2738         if (ret && !async->error)
2739                 async->error = ret;
2740 done:
2741         if (async->sync)
2742                 complete(&async->wait);
2743         else
2744                 kfree(async);
2745 }
2746
2747 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2748                                  unsigned long count, int wait)
2749 {
2750         struct async_delayed_refs *async;
2751         int ret;
2752
2753         async = kmalloc(sizeof(*async), GFP_NOFS);
2754         if (!async)
2755                 return -ENOMEM;
2756
2757         async->root = root->fs_info->tree_root;
2758         async->count = count;
2759         async->error = 0;
2760         if (wait)
2761                 async->sync = 1;
2762         else
2763                 async->sync = 0;
2764         init_completion(&async->wait);
2765
2766         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2767                         delayed_ref_async_start, NULL, NULL);
2768
2769         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2770
2771         if (wait) {
2772                 wait_for_completion(&async->wait);
2773                 ret = async->error;
2774                 kfree(async);
2775                 return ret;
2776         }
2777         return 0;
2778 }
2779
2780 /*
2781  * this starts processing the delayed reference count updates and
2782  * extent insertions we have queued up so far.  count can be
2783  * 0, which means to process everything in the tree at the start
2784  * of the run (but not newly added entries), or it can be some target
2785  * number you'd like to process.
2786  *
2787  * Returns 0 on success or if called with an aborted transaction
2788  * Returns <0 on error and aborts the transaction
2789  */
2790 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2791                            struct btrfs_root *root, unsigned long count)
2792 {
2793         struct rb_node *node;
2794         struct btrfs_delayed_ref_root *delayed_refs;
2795         struct btrfs_delayed_ref_head *head;
2796         int ret;
2797         int run_all = count == (unsigned long)-1;
2798
2799         /* We'll clean this up in btrfs_cleanup_transaction */
2800         if (trans->aborted)
2801                 return 0;
2802
2803         if (root == root->fs_info->extent_root)
2804                 root = root->fs_info->tree_root;
2805
2806         delayed_refs = &trans->transaction->delayed_refs;
2807         if (count == 0)
2808                 count = atomic_read(&delayed_refs->num_entries) * 2;
2809
2810 again:
2811 #ifdef SCRAMBLE_DELAYED_REFS
2812         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2813 #endif
2814         ret = __btrfs_run_delayed_refs(trans, root, count);
2815         if (ret < 0) {
2816                 btrfs_abort_transaction(trans, root, ret);
2817                 return ret;
2818         }
2819
2820         if (run_all) {
2821                 if (!list_empty(&trans->new_bgs))
2822                         btrfs_create_pending_block_groups(trans, root);
2823
2824                 spin_lock(&delayed_refs->lock);
2825                 node = rb_first(&delayed_refs->href_root);
2826                 if (!node) {
2827                         spin_unlock(&delayed_refs->lock);
2828                         goto out;
2829                 }
2830                 count = (unsigned long)-1;
2831
2832                 while (node) {
2833                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2834                                         href_node);
2835                         if (btrfs_delayed_ref_is_head(&head->node)) {
2836                                 struct btrfs_delayed_ref_node *ref;
2837
2838                                 ref = &head->node;
2839                                 atomic_inc(&ref->refs);
2840
2841                                 spin_unlock(&delayed_refs->lock);
2842                                 /*
2843                                  * Mutex was contended, block until it's
2844                                  * released and try again
2845                                  */
2846                                 mutex_lock(&head->mutex);
2847                                 mutex_unlock(&head->mutex);
2848
2849                                 btrfs_put_delayed_ref(ref);
2850                                 cond_resched();
2851                                 goto again;
2852                         } else {
2853                                 WARN_ON(1);
2854                         }
2855                         node = rb_next(node);
2856                 }
2857                 spin_unlock(&delayed_refs->lock);
2858                 cond_resched();
2859                 goto again;
2860         }
2861 out:
2862         ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2863         if (ret)
2864                 return ret;
2865         assert_qgroups_uptodate(trans);
2866         return 0;
2867 }
2868
2869 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2870                                 struct btrfs_root *root,
2871                                 u64 bytenr, u64 num_bytes, u64 flags,
2872                                 int level, int is_data)
2873 {
2874         struct btrfs_delayed_extent_op *extent_op;
2875         int ret;
2876
2877         extent_op = btrfs_alloc_delayed_extent_op();
2878         if (!extent_op)
2879                 return -ENOMEM;
2880
2881         extent_op->flags_to_set = flags;
2882         extent_op->update_flags = 1;
2883         extent_op->update_key = 0;
2884         extent_op->is_data = is_data ? 1 : 0;
2885         extent_op->level = level;
2886
2887         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2888                                           num_bytes, extent_op);
2889         if (ret)
2890                 btrfs_free_delayed_extent_op(extent_op);
2891         return ret;
2892 }
2893
2894 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2895                                       struct btrfs_root *root,
2896                                       struct btrfs_path *path,
2897                                       u64 objectid, u64 offset, u64 bytenr)
2898 {
2899         struct btrfs_delayed_ref_head *head;
2900         struct btrfs_delayed_ref_node *ref;
2901         struct btrfs_delayed_data_ref *data_ref;
2902         struct btrfs_delayed_ref_root *delayed_refs;
2903         struct rb_node *node;
2904         int ret = 0;
2905
2906         delayed_refs = &trans->transaction->delayed_refs;
2907         spin_lock(&delayed_refs->lock);
2908         head = btrfs_find_delayed_ref_head(trans, bytenr);
2909         if (!head) {
2910                 spin_unlock(&delayed_refs->lock);
2911                 return 0;
2912         }
2913
2914         if (!mutex_trylock(&head->mutex)) {
2915                 atomic_inc(&head->node.refs);
2916                 spin_unlock(&delayed_refs->lock);
2917
2918                 btrfs_release_path(path);
2919
2920                 /*
2921                  * Mutex was contended, block until it's released and let
2922                  * caller try again
2923                  */
2924                 mutex_lock(&head->mutex);
2925                 mutex_unlock(&head->mutex);
2926                 btrfs_put_delayed_ref(&head->node);
2927                 return -EAGAIN;
2928         }
2929         spin_unlock(&delayed_refs->lock);
2930
2931         spin_lock(&head->lock);
2932         node = rb_first(&head->ref_root);
2933         while (node) {
2934                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2935                 node = rb_next(node);
2936
2937                 /* If it's a shared ref we know a cross reference exists */
2938                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2939                         ret = 1;
2940                         break;
2941                 }
2942
2943                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2944
2945                 /*
2946                  * If our ref doesn't match the one we're currently looking at
2947                  * then we have a cross reference.
2948                  */
2949                 if (data_ref->root != root->root_key.objectid ||
2950                     data_ref->objectid != objectid ||
2951                     data_ref->offset != offset) {
2952                         ret = 1;
2953                         break;
2954                 }
2955         }
2956         spin_unlock(&head->lock);
2957         mutex_unlock(&head->mutex);
2958         return ret;
2959 }
2960
2961 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2962                                         struct btrfs_root *root,
2963                                         struct btrfs_path *path,
2964                                         u64 objectid, u64 offset, u64 bytenr)
2965 {
2966         struct btrfs_root *extent_root = root->fs_info->extent_root;
2967         struct extent_buffer *leaf;
2968         struct btrfs_extent_data_ref *ref;
2969         struct btrfs_extent_inline_ref *iref;
2970         struct btrfs_extent_item *ei;
2971         struct btrfs_key key;
2972         u32 item_size;
2973         int ret;
2974
2975         key.objectid = bytenr;
2976         key.offset = (u64)-1;
2977         key.type = BTRFS_EXTENT_ITEM_KEY;
2978
2979         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2980         if (ret < 0)
2981                 goto out;
2982         BUG_ON(ret == 0); /* Corruption */
2983
2984         ret = -ENOENT;
2985         if (path->slots[0] == 0)
2986                 goto out;
2987
2988         path->slots[0]--;
2989         leaf = path->nodes[0];
2990         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2991
2992         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2993                 goto out;
2994
2995         ret = 1;
2996         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2997 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2998         if (item_size < sizeof(*ei)) {
2999                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3000                 goto out;
3001         }
3002 #endif
3003         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3004
3005         if (item_size != sizeof(*ei) +
3006             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3007                 goto out;
3008
3009         if (btrfs_extent_generation(leaf, ei) <=
3010             btrfs_root_last_snapshot(&root->root_item))
3011                 goto out;
3012
3013         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3014         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3015             BTRFS_EXTENT_DATA_REF_KEY)
3016                 goto out;
3017
3018         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3019         if (btrfs_extent_refs(leaf, ei) !=
3020             btrfs_extent_data_ref_count(leaf, ref) ||
3021             btrfs_extent_data_ref_root(leaf, ref) !=
3022             root->root_key.objectid ||
3023             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3024             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3025                 goto out;
3026
3027         ret = 0;
3028 out:
3029         return ret;
3030 }
3031
3032 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3033                           struct btrfs_root *root,
3034                           u64 objectid, u64 offset, u64 bytenr)
3035 {
3036         struct btrfs_path *path;
3037         int ret;
3038         int ret2;
3039
3040         path = btrfs_alloc_path();
3041         if (!path)
3042                 return -ENOENT;
3043
3044         do {
3045                 ret = check_committed_ref(trans, root, path, objectid,
3046                                           offset, bytenr);
3047                 if (ret && ret != -ENOENT)
3048                         goto out;
3049
3050                 ret2 = check_delayed_ref(trans, root, path, objectid,
3051                                          offset, bytenr);
3052         } while (ret2 == -EAGAIN);
3053
3054         if (ret2 && ret2 != -ENOENT) {
3055                 ret = ret2;
3056                 goto out;
3057         }
3058
3059         if (ret != -ENOENT || ret2 != -ENOENT)
3060                 ret = 0;
3061 out:
3062         btrfs_free_path(path);
3063         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3064                 WARN_ON(ret > 0);
3065         return ret;
3066 }
3067
3068 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3069                            struct btrfs_root *root,
3070                            struct extent_buffer *buf,
3071                            int full_backref, int inc)
3072 {
3073         u64 bytenr;
3074         u64 num_bytes;
3075         u64 parent;
3076         u64 ref_root;
3077         u32 nritems;
3078         struct btrfs_key key;
3079         struct btrfs_file_extent_item *fi;
3080         int i;
3081         int level;
3082         int ret = 0;
3083         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3084                             u64, u64, u64, u64, u64, u64, int);
3085
3086
3087         if (btrfs_test_is_dummy_root(root))
3088                 return 0;
3089
3090         ref_root = btrfs_header_owner(buf);
3091         nritems = btrfs_header_nritems(buf);
3092         level = btrfs_header_level(buf);
3093
3094         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3095                 return 0;
3096
3097         if (inc)
3098                 process_func = btrfs_inc_extent_ref;
3099         else
3100                 process_func = btrfs_free_extent;
3101
3102         if (full_backref)
3103                 parent = buf->start;
3104         else
3105                 parent = 0;
3106
3107         for (i = 0; i < nritems; i++) {
3108                 if (level == 0) {
3109                         btrfs_item_key_to_cpu(buf, &key, i);
3110                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3111                                 continue;
3112                         fi = btrfs_item_ptr(buf, i,
3113                                             struct btrfs_file_extent_item);
3114                         if (btrfs_file_extent_type(buf, fi) ==
3115                             BTRFS_FILE_EXTENT_INLINE)
3116                                 continue;
3117                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3118                         if (bytenr == 0)
3119                                 continue;
3120
3121                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3122                         key.offset -= btrfs_file_extent_offset(buf, fi);
3123                         ret = process_func(trans, root, bytenr, num_bytes,
3124                                            parent, ref_root, key.objectid,
3125                                            key.offset, 1);
3126                         if (ret)
3127                                 goto fail;
3128                 } else {
3129                         bytenr = btrfs_node_blockptr(buf, i);
3130                         num_bytes = root->nodesize;
3131                         ret = process_func(trans, root, bytenr, num_bytes,
3132                                            parent, ref_root, level - 1, 0,
3133                                            1);
3134                         if (ret)
3135                                 goto fail;
3136                 }
3137         }
3138         return 0;
3139 fail:
3140         return ret;
3141 }
3142
3143 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3144                   struct extent_buffer *buf, int full_backref)
3145 {
3146         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3147 }
3148
3149 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3150                   struct extent_buffer *buf, int full_backref)
3151 {
3152         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3153 }
3154
3155 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3156                                  struct btrfs_root *root,
3157                                  struct btrfs_path *path,
3158                                  struct btrfs_block_group_cache *cache)
3159 {
3160         int ret;
3161         struct btrfs_root *extent_root = root->fs_info->extent_root;
3162         unsigned long bi;
3163         struct extent_buffer *leaf;
3164
3165         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3166         if (ret) {
3167                 if (ret > 0)
3168                         ret = -ENOENT;
3169                 goto fail;
3170         }
3171
3172         leaf = path->nodes[0];
3173         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3174         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3175         btrfs_mark_buffer_dirty(leaf);
3176         btrfs_release_path(path);
3177 fail:
3178         if (ret)
3179                 btrfs_abort_transaction(trans, root, ret);
3180         return ret;
3181
3182 }
3183
3184 static struct btrfs_block_group_cache *
3185 next_block_group(struct btrfs_root *root,
3186                  struct btrfs_block_group_cache *cache)
3187 {
3188         struct rb_node *node;
3189
3190         spin_lock(&root->fs_info->block_group_cache_lock);
3191
3192         /* If our block group was removed, we need a full search. */
3193         if (RB_EMPTY_NODE(&cache->cache_node)) {
3194                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3195
3196                 spin_unlock(&root->fs_info->block_group_cache_lock);
3197                 btrfs_put_block_group(cache);
3198                 cache = btrfs_lookup_first_block_group(root->fs_info,
3199                                                        next_bytenr);
3200                 return cache;
3201         }
3202         node = rb_next(&cache->cache_node);
3203         btrfs_put_block_group(cache);
3204         if (node) {
3205                 cache = rb_entry(node, struct btrfs_block_group_cache,
3206                                  cache_node);
3207                 btrfs_get_block_group(cache);
3208         } else
3209                 cache = NULL;
3210         spin_unlock(&root->fs_info->block_group_cache_lock);
3211         return cache;
3212 }
3213
3214 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3215                             struct btrfs_trans_handle *trans,
3216                             struct btrfs_path *path)
3217 {
3218         struct btrfs_root *root = block_group->fs_info->tree_root;
3219         struct inode *inode = NULL;
3220         u64 alloc_hint = 0;
3221         int dcs = BTRFS_DC_ERROR;
3222         u64 num_pages = 0;
3223         int retries = 0;
3224         int ret = 0;
3225
3226         /*
3227          * If this block group is smaller than 100 megs don't bother caching the
3228          * block group.
3229          */
3230         if (block_group->key.offset < (100 * 1024 * 1024)) {
3231                 spin_lock(&block_group->lock);
3232                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3233                 spin_unlock(&block_group->lock);
3234                 return 0;
3235         }
3236
3237         if (trans->aborted)
3238                 return 0;
3239 again:
3240         inode = lookup_free_space_inode(root, block_group, path);
3241         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3242                 ret = PTR_ERR(inode);
3243                 btrfs_release_path(path);
3244                 goto out;
3245         }
3246
3247         if (IS_ERR(inode)) {
3248                 BUG_ON(retries);
3249                 retries++;
3250
3251                 if (block_group->ro)
3252                         goto out_free;
3253
3254                 ret = create_free_space_inode(root, trans, block_group, path);
3255                 if (ret)
3256                         goto out_free;
3257                 goto again;
3258         }
3259
3260         /* We've already setup this transaction, go ahead and exit */
3261         if (block_group->cache_generation == trans->transid &&
3262             i_size_read(inode)) {
3263                 dcs = BTRFS_DC_SETUP;
3264                 goto out_put;
3265         }
3266
3267         /*
3268          * We want to set the generation to 0, that way if anything goes wrong
3269          * from here on out we know not to trust this cache when we load up next
3270          * time.
3271          */
3272         BTRFS_I(inode)->generation = 0;
3273         ret = btrfs_update_inode(trans, root, inode);
3274         if (ret) {
3275                 /*
3276                  * So theoretically we could recover from this, simply set the
3277                  * super cache generation to 0 so we know to invalidate the
3278                  * cache, but then we'd have to keep track of the block groups
3279                  * that fail this way so we know we _have_ to reset this cache
3280                  * before the next commit or risk reading stale cache.  So to
3281                  * limit our exposure to horrible edge cases lets just abort the
3282                  * transaction, this only happens in really bad situations
3283                  * anyway.
3284                  */
3285                 btrfs_abort_transaction(trans, root, ret);
3286                 goto out_put;
3287         }
3288         WARN_ON(ret);
3289
3290         if (i_size_read(inode) > 0) {
3291                 ret = btrfs_check_trunc_cache_free_space(root,
3292                                         &root->fs_info->global_block_rsv);
3293                 if (ret)
3294                         goto out_put;
3295
3296                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3297                 if (ret)
3298                         goto out_put;
3299         }
3300
3301         spin_lock(&block_group->lock);
3302         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3303             !btrfs_test_opt(root, SPACE_CACHE) ||
3304             block_group->delalloc_bytes) {
3305                 /*
3306                  * don't bother trying to write stuff out _if_
3307                  * a) we're not cached,
3308                  * b) we're with nospace_cache mount option.
3309                  */
3310                 dcs = BTRFS_DC_WRITTEN;
3311                 spin_unlock(&block_group->lock);
3312                 goto out_put;
3313         }
3314         spin_unlock(&block_group->lock);
3315
3316         /*
3317          * Try to preallocate enough space based on how big the block group is.
3318          * Keep in mind this has to include any pinned space which could end up
3319          * taking up quite a bit since it's not folded into the other space
3320          * cache.
3321          */
3322         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3323         if (!num_pages)
3324                 num_pages = 1;
3325
3326         num_pages *= 16;
3327         num_pages *= PAGE_CACHE_SIZE;
3328
3329         ret = btrfs_check_data_free_space(inode, num_pages);
3330         if (ret)
3331                 goto out_put;
3332
3333         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3334                                               num_pages, num_pages,
3335                                               &alloc_hint);
3336         if (!ret)
3337                 dcs = BTRFS_DC_SETUP;
3338         btrfs_free_reserved_data_space(inode, num_pages);
3339
3340 out_put:
3341         iput(inode);
3342 out_free:
3343         btrfs_release_path(path);
3344 out:
3345         spin_lock(&block_group->lock);
3346         if (!ret && dcs == BTRFS_DC_SETUP)
3347                 block_group->cache_generation = trans->transid;
3348         block_group->disk_cache_state = dcs;
3349         spin_unlock(&block_group->lock);
3350
3351         return ret;
3352 }
3353
3354 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3355                             struct btrfs_root *root)
3356 {
3357         struct btrfs_block_group_cache *cache, *tmp;
3358         struct btrfs_transaction *cur_trans = trans->transaction;
3359         struct btrfs_path *path;
3360
3361         if (list_empty(&cur_trans->dirty_bgs) ||
3362             !btrfs_test_opt(root, SPACE_CACHE))
3363                 return 0;
3364
3365         path = btrfs_alloc_path();
3366         if (!path)
3367                 return -ENOMEM;
3368
3369         /* Could add new block groups, use _safe just in case */
3370         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3371                                  dirty_list) {
3372                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3373                         cache_save_setup(cache, trans, path);
3374         }
3375
3376         btrfs_free_path(path);
3377         return 0;
3378 }
3379
3380 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3381                                    struct btrfs_root *root)
3382 {
3383         struct btrfs_block_group_cache *cache;
3384         struct btrfs_transaction *cur_trans = trans->transaction;
3385         int ret = 0;
3386         struct btrfs_path *path;
3387
3388         if (list_empty(&cur_trans->dirty_bgs))
3389                 return 0;
3390
3391         path = btrfs_alloc_path();
3392         if (!path)
3393                 return -ENOMEM;
3394
3395         /*
3396          * We don't need the lock here since we are protected by the transaction
3397          * commit.  We want to do the cache_save_setup first and then run the
3398          * delayed refs to make sure we have the best chance at doing this all
3399          * in one shot.
3400          */
3401         while (!list_empty(&cur_trans->dirty_bgs)) {
3402                 cache = list_first_entry(&cur_trans->dirty_bgs,
3403                                          struct btrfs_block_group_cache,
3404                                          dirty_list);
3405                 list_del_init(&cache->dirty_list);
3406                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3407                         cache_save_setup(cache, trans, path);
3408                 if (!ret)
3409                         ret = btrfs_run_delayed_refs(trans, root,
3410                                                      (unsigned long) -1);
3411                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
3412                         btrfs_write_out_cache(root, trans, cache, path);
3413                 if (!ret)
3414                         ret = write_one_cache_group(trans, root, path, cache);
3415                 btrfs_put_block_group(cache);
3416         }
3417
3418         btrfs_free_path(path);
3419         return ret;
3420 }
3421
3422 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3423 {
3424         struct btrfs_block_group_cache *block_group;
3425         int readonly = 0;
3426
3427         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3428         if (!block_group || block_group->ro)
3429                 readonly = 1;
3430         if (block_group)
3431                 btrfs_put_block_group(block_group);
3432         return readonly;
3433 }
3434
3435 static const char *alloc_name(u64 flags)
3436 {
3437         switch (flags) {
3438         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3439                 return "mixed";
3440         case BTRFS_BLOCK_GROUP_METADATA:
3441                 return "metadata";
3442         case BTRFS_BLOCK_GROUP_DATA:
3443                 return "data";
3444         case BTRFS_BLOCK_GROUP_SYSTEM:
3445                 return "system";
3446         default:
3447                 WARN_ON(1);
3448                 return "invalid-combination";
3449         };
3450 }
3451
3452 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3453                              u64 total_bytes, u64 bytes_used,
3454                              struct btrfs_space_info **space_info)
3455 {
3456         struct btrfs_space_info *found;
3457         int i;
3458         int factor;
3459         int ret;
3460
3461         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3462                      BTRFS_BLOCK_GROUP_RAID10))
3463                 factor = 2;
3464         else
3465                 factor = 1;
3466
3467         found = __find_space_info(info, flags);
3468         if (found) {
3469                 spin_lock(&found->lock);
3470                 found->total_bytes += total_bytes;
3471                 found->disk_total += total_bytes * factor;
3472                 found->bytes_used += bytes_used;
3473                 found->disk_used += bytes_used * factor;
3474                 found->full = 0;
3475                 spin_unlock(&found->lock);
3476                 *space_info = found;
3477                 return 0;
3478         }
3479         found = kzalloc(sizeof(*found), GFP_NOFS);
3480         if (!found)
3481                 return -ENOMEM;
3482
3483         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3484         if (ret) {
3485                 kfree(found);
3486                 return ret;
3487         }
3488
3489         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3490                 INIT_LIST_HEAD(&found->block_groups[i]);
3491         init_rwsem(&found->groups_sem);
3492         spin_lock_init(&found->lock);
3493         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3494         found->total_bytes = total_bytes;
3495         found->disk_total = total_bytes * factor;
3496         found->bytes_used = bytes_used;
3497         found->disk_used = bytes_used * factor;
3498         found->bytes_pinned = 0;
3499         found->bytes_reserved = 0;
3500         found->bytes_readonly = 0;
3501         found->bytes_may_use = 0;
3502         found->full = 0;
3503         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3504         found->chunk_alloc = 0;
3505         found->flush = 0;
3506         init_waitqueue_head(&found->wait);
3507         INIT_LIST_HEAD(&found->ro_bgs);
3508
3509         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3510                                     info->space_info_kobj, "%s",
3511                                     alloc_name(found->flags));
3512         if (ret) {
3513                 kfree(found);
3514                 return ret;
3515         }
3516
3517         *space_info = found;
3518         list_add_rcu(&found->list, &info->space_info);
3519         if (flags & BTRFS_BLOCK_GROUP_DATA)
3520                 info->data_sinfo = found;
3521
3522         return ret;
3523 }
3524
3525 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3526 {
3527         u64 extra_flags = chunk_to_extended(flags) &
3528                                 BTRFS_EXTENDED_PROFILE_MASK;
3529
3530         write_seqlock(&fs_info->profiles_lock);
3531         if (flags & BTRFS_BLOCK_GROUP_DATA)
3532                 fs_info->avail_data_alloc_bits |= extra_flags;
3533         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3534                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3535         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3536                 fs_info->avail_system_alloc_bits |= extra_flags;
3537         write_sequnlock(&fs_info->profiles_lock);
3538 }
3539
3540 /*
3541  * returns target flags in extended format or 0 if restripe for this
3542  * chunk_type is not in progress
3543  *
3544  * should be called with either volume_mutex or balance_lock held
3545  */
3546 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3547 {
3548         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3549         u64 target = 0;
3550
3551         if (!bctl)
3552                 return 0;
3553
3554         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3555             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3556                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3557         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3558                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3559                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3560         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3561                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3562                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3563         }
3564
3565         return target;
3566 }
3567
3568 /*
3569  * @flags: available profiles in extended format (see ctree.h)
3570  *
3571  * Returns reduced profile in chunk format.  If profile changing is in
3572  * progress (either running or paused) picks the target profile (if it's
3573  * already available), otherwise falls back to plain reducing.
3574  */
3575 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3576 {
3577         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3578         u64 target;
3579         u64 tmp;
3580
3581         /*
3582          * see if restripe for this chunk_type is in progress, if so
3583          * try to reduce to the target profile
3584          */
3585         spin_lock(&root->fs_info->balance_lock);
3586         target = get_restripe_target(root->fs_info, flags);
3587         if (target) {
3588                 /* pick target profile only if it's already available */
3589                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3590                         spin_unlock(&root->fs_info->balance_lock);
3591                         return extended_to_chunk(target);
3592                 }
3593         }
3594         spin_unlock(&root->fs_info->balance_lock);
3595
3596         /* First, mask out the RAID levels which aren't possible */
3597         if (num_devices == 1)
3598                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3599                            BTRFS_BLOCK_GROUP_RAID5);
3600         if (num_devices < 3)
3601                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3602         if (num_devices < 4)
3603                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3604
3605         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3606                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3607                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3608         flags &= ~tmp;
3609
3610         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3611                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3612         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3613                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3614         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3615                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3616         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3617                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3618         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3619                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3620
3621         return extended_to_chunk(flags | tmp);
3622 }
3623
3624 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3625 {
3626         unsigned seq;
3627         u64 flags;
3628
3629         do {
3630                 flags = orig_flags;
3631                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3632
3633                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3634                         flags |= root->fs_info->avail_data_alloc_bits;
3635                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3636                         flags |= root->fs_info->avail_system_alloc_bits;
3637                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3638                         flags |= root->fs_info->avail_metadata_alloc_bits;
3639         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3640
3641         return btrfs_reduce_alloc_profile(root, flags);
3642 }
3643
3644 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3645 {
3646         u64 flags;
3647         u64 ret;
3648
3649         if (data)
3650                 flags = BTRFS_BLOCK_GROUP_DATA;
3651         else if (root == root->fs_info->chunk_root)
3652                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3653         else
3654                 flags = BTRFS_BLOCK_GROUP_METADATA;
3655
3656         ret = get_alloc_profile(root, flags);
3657         return ret;
3658 }
3659
3660 /*
3661  * This will check the space that the inode allocates from to make sure we have
3662  * enough space for bytes.
3663  */
3664 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3665 {
3666         struct btrfs_space_info *data_sinfo;
3667         struct btrfs_root *root = BTRFS_I(inode)->root;
3668         struct btrfs_fs_info *fs_info = root->fs_info;
3669         u64 used;
3670         int ret = 0, committed = 0;
3671
3672         /* make sure bytes are sectorsize aligned */
3673         bytes = ALIGN(bytes, root->sectorsize);
3674
3675         if (btrfs_is_free_space_inode(inode)) {
3676                 committed = 1;
3677                 ASSERT(current->journal_info);
3678         }
3679
3680         data_sinfo = fs_info->data_sinfo;
3681         if (!data_sinfo)
3682                 goto alloc;
3683
3684 again:
3685         /* make sure we have enough space to handle the data first */
3686         spin_lock(&data_sinfo->lock);
3687         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3688                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3689                 data_sinfo->bytes_may_use;
3690
3691         if (used + bytes > data_sinfo->total_bytes) {
3692                 struct btrfs_trans_handle *trans;
3693
3694                 /*
3695                  * if we don't have enough free bytes in this space then we need
3696                  * to alloc a new chunk.
3697                  */
3698                 if (!data_sinfo->full) {
3699                         u64 alloc_target;
3700
3701                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3702                         spin_unlock(&data_sinfo->lock);
3703 alloc:
3704                         alloc_target = btrfs_get_alloc_profile(root, 1);
3705                         /*
3706                          * It is ugly that we don't call nolock join
3707                          * transaction for the free space inode case here.
3708                          * But it is safe because we only do the data space
3709                          * reservation for the free space cache in the
3710                          * transaction context, the common join transaction
3711                          * just increase the counter of the current transaction
3712                          * handler, doesn't try to acquire the trans_lock of
3713                          * the fs.
3714                          */
3715                         trans = btrfs_join_transaction(root);
3716                         if (IS_ERR(trans))
3717                                 return PTR_ERR(trans);
3718
3719                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3720                                              alloc_target,
3721                                              CHUNK_ALLOC_NO_FORCE);
3722                         btrfs_end_transaction(trans, root);
3723                         if (ret < 0) {
3724                                 if (ret != -ENOSPC)
3725                                         return ret;
3726                                 else
3727                                         goto commit_trans;
3728                         }
3729
3730                         if (!data_sinfo)
3731                                 data_sinfo = fs_info->data_sinfo;
3732
3733                         goto again;
3734                 }
3735
3736                 /*
3737                  * If we don't have enough pinned space to deal with this
3738                  * allocation don't bother committing the transaction.
3739                  */
3740                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3741                                            bytes) < 0)
3742                         committed = 1;
3743                 spin_unlock(&data_sinfo->lock);
3744
3745                 /* commit the current transaction and try again */
3746 commit_trans:
3747                 if (!committed &&
3748                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3749                         committed = 1;
3750
3751                         trans = btrfs_join_transaction(root);
3752                         if (IS_ERR(trans))
3753                                 return PTR_ERR(trans);
3754                         ret = btrfs_commit_transaction(trans, root);
3755                         if (ret)
3756                                 return ret;
3757                         goto again;
3758                 }
3759
3760                 trace_btrfs_space_reservation(root->fs_info,
3761                                               "space_info:enospc",
3762                                               data_sinfo->flags, bytes, 1);
3763                 return -ENOSPC;
3764         }
3765         data_sinfo->bytes_may_use += bytes;
3766         trace_btrfs_space_reservation(root->fs_info, "space_info",
3767                                       data_sinfo->flags, bytes, 1);
3768         spin_unlock(&data_sinfo->lock);
3769
3770         return 0;
3771 }
3772
3773 /*
3774  * Called if we need to clear a data reservation for this inode.
3775  */
3776 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3777 {
3778         struct btrfs_root *root = BTRFS_I(inode)->root;
3779         struct btrfs_space_info *data_sinfo;
3780
3781         /* make sure bytes are sectorsize aligned */
3782         bytes = ALIGN(bytes, root->sectorsize);
3783
3784         data_sinfo = root->fs_info->data_sinfo;
3785         spin_lock(&data_sinfo->lock);
3786         WARN_ON(data_sinfo->bytes_may_use < bytes);
3787         data_sinfo->bytes_may_use -= bytes;
3788         trace_btrfs_space_reservation(root->fs_info, "space_info",
3789                                       data_sinfo->flags, bytes, 0);
3790         spin_unlock(&data_sinfo->lock);
3791 }
3792
3793 static void force_metadata_allocation(struct btrfs_fs_info *info)
3794 {
3795         struct list_head *head = &info->space_info;
3796         struct btrfs_space_info *found;
3797
3798         rcu_read_lock();
3799         list_for_each_entry_rcu(found, head, list) {
3800                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3801                         found->force_alloc = CHUNK_ALLOC_FORCE;
3802         }
3803         rcu_read_unlock();
3804 }
3805
3806 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3807 {
3808         return (global->size << 1);
3809 }
3810
3811 static int should_alloc_chunk(struct btrfs_root *root,
3812                               struct btrfs_space_info *sinfo, int force)
3813 {
3814         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3815         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3816         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3817         u64 thresh;
3818
3819         if (force == CHUNK_ALLOC_FORCE)
3820                 return 1;
3821
3822         /*
3823          * We need to take into account the global rsv because for all intents
3824          * and purposes it's used space.  Don't worry about locking the
3825          * global_rsv, it doesn't change except when the transaction commits.
3826          */
3827         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3828                 num_allocated += calc_global_rsv_need_space(global_rsv);
3829
3830         /*
3831          * in limited mode, we want to have some free space up to
3832          * about 1% of the FS size.
3833          */
3834         if (force == CHUNK_ALLOC_LIMITED) {
3835                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3836                 thresh = max_t(u64, 64 * 1024 * 1024,
3837                                div_factor_fine(thresh, 1));
3838
3839                 if (num_bytes - num_allocated < thresh)
3840                         return 1;
3841         }
3842
3843         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3844                 return 0;
3845         return 1;
3846 }
3847
3848 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3849 {
3850         u64 num_dev;
3851
3852         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3853                     BTRFS_BLOCK_GROUP_RAID0 |
3854                     BTRFS_BLOCK_GROUP_RAID5 |
3855                     BTRFS_BLOCK_GROUP_RAID6))
3856                 num_dev = root->fs_info->fs_devices->rw_devices;
3857         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3858                 num_dev = 2;
3859         else
3860                 num_dev = 1;    /* DUP or single */
3861
3862         /* metadata for updaing devices and chunk tree */
3863         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3864 }
3865
3866 static void check_system_chunk(struct btrfs_trans_handle *trans,
3867                                struct btrfs_root *root, u64 type)
3868 {
3869         struct btrfs_space_info *info;
3870         u64 left;
3871         u64 thresh;
3872
3873         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3874         spin_lock(&info->lock);
3875         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3876                 info->bytes_reserved - info->bytes_readonly;
3877         spin_unlock(&info->lock);
3878
3879         thresh = get_system_chunk_thresh(root, type);
3880         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3881                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3882                         left, thresh, type);
3883                 dump_space_info(info, 0, 0);
3884         }
3885
3886         if (left < thresh) {
3887                 u64 flags;
3888
3889                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3890                 btrfs_alloc_chunk(trans, root, flags);
3891         }
3892 }
3893
3894 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3895                           struct btrfs_root *extent_root, u64 flags, int force)
3896 {
3897         struct btrfs_space_info *space_info;
3898         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3899         int wait_for_alloc = 0;
3900         int ret = 0;
3901
3902         /* Don't re-enter if we're already allocating a chunk */
3903         if (trans->allocating_chunk)
3904                 return -ENOSPC;
3905
3906         space_info = __find_space_info(extent_root->fs_info, flags);
3907         if (!space_info) {
3908                 ret = update_space_info(extent_root->fs_info, flags,
3909                                         0, 0, &space_info);
3910                 BUG_ON(ret); /* -ENOMEM */
3911         }
3912         BUG_ON(!space_info); /* Logic error */
3913
3914 again:
3915         spin_lock(&space_info->lock);
3916         if (force < space_info->force_alloc)
3917                 force = space_info->force_alloc;
3918         if (space_info->full) {
3919                 if (should_alloc_chunk(extent_root, space_info, force))
3920                         ret = -ENOSPC;
3921                 else
3922                         ret = 0;
3923                 spin_unlock(&space_info->lock);
3924                 return ret;
3925         }
3926
3927         if (!should_alloc_chunk(extent_root, space_info, force)) {
3928                 spin_unlock(&space_info->lock);
3929                 return 0;
3930         } else if (space_info->chunk_alloc) {
3931                 wait_for_alloc = 1;
3932         } else {
3933                 space_info->chunk_alloc = 1;
3934         }
3935
3936         spin_unlock(&space_info->lock);
3937
3938         mutex_lock(&fs_info->chunk_mutex);
3939
3940         /*
3941          * The chunk_mutex is held throughout the entirety of a chunk
3942          * allocation, so once we've acquired the chunk_mutex we know that the
3943          * other guy is done and we need to recheck and see if we should
3944          * allocate.
3945          */
3946         if (wait_for_alloc) {
3947                 mutex_unlock(&fs_info->chunk_mutex);
3948                 wait_for_alloc = 0;
3949                 goto again;
3950         }
3951
3952         trans->allocating_chunk = true;
3953
3954         /*
3955          * If we have mixed data/metadata chunks we want to make sure we keep
3956          * allocating mixed chunks instead of individual chunks.
3957          */
3958         if (btrfs_mixed_space_info(space_info))
3959                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3960
3961         /*
3962          * if we're doing a data chunk, go ahead and make sure that
3963          * we keep a reasonable number of metadata chunks allocated in the
3964          * FS as well.
3965          */
3966         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3967                 fs_info->data_chunk_allocations++;
3968                 if (!(fs_info->data_chunk_allocations %
3969                       fs_info->metadata_ratio))
3970                         force_metadata_allocation(fs_info);
3971         }
3972
3973         /*
3974          * Check if we have enough space in SYSTEM chunk because we may need
3975          * to update devices.
3976          */
3977         check_system_chunk(trans, extent_root, flags);
3978
3979         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3980         trans->allocating_chunk = false;
3981
3982         spin_lock(&space_info->lock);
3983         if (ret < 0 && ret != -ENOSPC)
3984                 goto out;
3985         if (ret)
3986                 space_info->full = 1;
3987         else
3988                 ret = 1;
3989
3990         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3991 out:
3992         space_info->chunk_alloc = 0;
3993         spin_unlock(&space_info->lock);
3994         mutex_unlock(&fs_info->chunk_mutex);
3995         return ret;
3996 }
3997
3998 static int can_overcommit(struct btrfs_root *root,
3999                           struct btrfs_space_info *space_info, u64 bytes,
4000                           enum btrfs_reserve_flush_enum flush)
4001 {
4002         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4003         u64 profile = btrfs_get_alloc_profile(root, 0);
4004         u64 space_size;
4005         u64 avail;
4006         u64 used;
4007
4008         used = space_info->bytes_used + space_info->bytes_reserved +
4009                 space_info->bytes_pinned + space_info->bytes_readonly;
4010
4011         /*
4012          * We only want to allow over committing if we have lots of actual space
4013          * free, but if we don't have enough space to handle the global reserve
4014          * space then we could end up having a real enospc problem when trying
4015          * to allocate a chunk or some other such important allocation.
4016          */
4017         spin_lock(&global_rsv->lock);
4018         space_size = calc_global_rsv_need_space(global_rsv);
4019         spin_unlock(&global_rsv->lock);
4020         if (used + space_size >= space_info->total_bytes)
4021                 return 0;
4022
4023         used += space_info->bytes_may_use;
4024
4025         spin_lock(&root->fs_info->free_chunk_lock);
4026         avail = root->fs_info->free_chunk_space;
4027         spin_unlock(&root->fs_info->free_chunk_lock);
4028
4029         /*
4030          * If we have dup, raid1 or raid10 then only half of the free
4031          * space is actually useable.  For raid56, the space info used
4032          * doesn't include the parity drive, so we don't have to
4033          * change the math
4034          */
4035         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4036                        BTRFS_BLOCK_GROUP_RAID1 |
4037                        BTRFS_BLOCK_GROUP_RAID10))
4038                 avail >>= 1;
4039
4040         /*
4041          * If we aren't flushing all things, let us overcommit up to
4042          * 1/2th of the space. If we can flush, don't let us overcommit
4043          * too much, let it overcommit up to 1/8 of the space.
4044          */
4045         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4046                 avail >>= 3;
4047         else
4048                 avail >>= 1;
4049
4050         if (used + bytes < space_info->total_bytes + avail)
4051                 return 1;
4052         return 0;
4053 }
4054
4055 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4056                                          unsigned long nr_pages, int nr_items)
4057 {
4058         struct super_block *sb = root->fs_info->sb;
4059
4060         if (down_read_trylock(&sb->s_umount)) {
4061                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4062                 up_read(&sb->s_umount);
4063         } else {
4064                 /*
4065                  * We needn't worry the filesystem going from r/w to r/o though
4066                  * we don't acquire ->s_umount mutex, because the filesystem
4067                  * should guarantee the delalloc inodes list be empty after
4068                  * the filesystem is readonly(all dirty pages are written to
4069                  * the disk).
4070                  */
4071                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4072                 if (!current->journal_info)
4073                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4074         }
4075 }
4076
4077 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4078 {
4079         u64 bytes;
4080         int nr;
4081
4082         bytes = btrfs_calc_trans_metadata_size(root, 1);
4083         nr = (int)div64_u64(to_reclaim, bytes);
4084         if (!nr)
4085                 nr = 1;
4086         return nr;
4087 }
4088
4089 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4090
4091 /*
4092  * shrink metadata reservation for delalloc
4093  */
4094 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4095                             bool wait_ordered)
4096 {
4097         struct btrfs_block_rsv *block_rsv;
4098         struct btrfs_space_info *space_info;
4099         struct btrfs_trans_handle *trans;
4100         u64 delalloc_bytes;
4101         u64 max_reclaim;
4102         long time_left;
4103         unsigned long nr_pages;
4104         int loops;
4105         int items;
4106         enum btrfs_reserve_flush_enum flush;
4107
4108         /* Calc the number of the pages we need flush for space reservation */
4109         items = calc_reclaim_items_nr(root, to_reclaim);
4110         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4111
4112         trans = (struct btrfs_trans_handle *)current->journal_info;
4113         block_rsv = &root->fs_info->delalloc_block_rsv;
4114         space_info = block_rsv->space_info;
4115
4116         delalloc_bytes = percpu_counter_sum_positive(
4117                                                 &root->fs_info->delalloc_bytes);
4118         if (delalloc_bytes == 0) {
4119                 if (trans)
4120                         return;
4121                 if (wait_ordered)
4122                         btrfs_wait_ordered_roots(root->fs_info, items);
4123                 return;
4124         }
4125
4126         loops = 0;
4127         while (delalloc_bytes && loops < 3) {
4128                 max_reclaim = min(delalloc_bytes, to_reclaim);
4129                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4130                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4131                 /*
4132                  * We need to wait for the async pages to actually start before
4133                  * we do anything.
4134                  */
4135                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4136                 if (!max_reclaim)
4137                         goto skip_async;
4138
4139                 if (max_reclaim <= nr_pages)
4140                         max_reclaim = 0;
4141                 else
4142                         max_reclaim -= nr_pages;
4143
4144                 wait_event(root->fs_info->async_submit_wait,
4145                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4146                            (int)max_reclaim);
4147 skip_async:
4148                 if (!trans)
4149                         flush = BTRFS_RESERVE_FLUSH_ALL;
4150                 else
4151                         flush = BTRFS_RESERVE_NO_FLUSH;
4152                 spin_lock(&space_info->lock);
4153                 if (can_overcommit(root, space_info, orig, flush)) {
4154                         spin_unlock(&space_info->lock);
4155                         break;
4156                 }
4157                 spin_unlock(&space_info->lock);
4158
4159                 loops++;
4160                 if (wait_ordered && !trans) {
4161                         btrfs_wait_ordered_roots(root->fs_info, items);
4162                 } else {
4163                         time_left = schedule_timeout_killable(1);
4164                         if (time_left)
4165                                 break;
4166                 }
4167                 delalloc_bytes = percpu_counter_sum_positive(
4168                                                 &root->fs_info->delalloc_bytes);
4169         }
4170 }
4171
4172 /**
4173  * maybe_commit_transaction - possibly commit the transaction if its ok to
4174  * @root - the root we're allocating for
4175  * @bytes - the number of bytes we want to reserve
4176  * @force - force the commit
4177  *
4178  * This will check to make sure that committing the transaction will actually
4179  * get us somewhere and then commit the transaction if it does.  Otherwise it
4180  * will return -ENOSPC.
4181  */
4182 static int may_commit_transaction(struct btrfs_root *root,
4183                                   struct btrfs_space_info *space_info,
4184                                   u64 bytes, int force)
4185 {
4186         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4187         struct btrfs_trans_handle *trans;
4188
4189         trans = (struct btrfs_trans_handle *)current->journal_info;
4190         if (trans)
4191                 return -EAGAIN;
4192
4193         if (force)
4194                 goto commit;
4195
4196         /* See if there is enough pinned space to make this reservation */
4197         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4198                                    bytes) >= 0)
4199                 goto commit;
4200
4201         /*
4202          * See if there is some space in the delayed insertion reservation for
4203          * this reservation.
4204          */
4205         if (space_info != delayed_rsv->space_info)
4206                 return -ENOSPC;
4207
4208         spin_lock(&delayed_rsv->lock);
4209         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4210                                    bytes - delayed_rsv->size) >= 0) {
4211                 spin_unlock(&delayed_rsv->lock);
4212                 return -ENOSPC;
4213         }
4214         spin_unlock(&delayed_rsv->lock);
4215
4216 commit:
4217         trans = btrfs_join_transaction(root);
4218         if (IS_ERR(trans))
4219                 return -ENOSPC;
4220
4221         return btrfs_commit_transaction(trans, root);
4222 }
4223
4224 enum flush_state {
4225         FLUSH_DELAYED_ITEMS_NR  =       1,
4226         FLUSH_DELAYED_ITEMS     =       2,
4227         FLUSH_DELALLOC          =       3,
4228         FLUSH_DELALLOC_WAIT     =       4,
4229         ALLOC_CHUNK             =       5,
4230         COMMIT_TRANS            =       6,
4231 };
4232
4233 static int flush_space(struct btrfs_root *root,
4234                        struct btrfs_space_info *space_info, u64 num_bytes,
4235                        u64 orig_bytes, int state)
4236 {
4237         struct btrfs_trans_handle *trans;
4238         int nr;
4239         int ret = 0;
4240
4241         switch (state) {
4242         case FLUSH_DELAYED_ITEMS_NR:
4243         case FLUSH_DELAYED_ITEMS:
4244                 if (state == FLUSH_DELAYED_ITEMS_NR)
4245                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4246                 else
4247                         nr = -1;
4248
4249                 trans = btrfs_join_transaction(root);
4250                 if (IS_ERR(trans)) {
4251                         ret = PTR_ERR(trans);
4252                         break;
4253                 }
4254                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4255                 btrfs_end_transaction(trans, root);
4256                 break;
4257         case FLUSH_DELALLOC:
4258         case FLUSH_DELALLOC_WAIT:
4259                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4260                                 state == FLUSH_DELALLOC_WAIT);
4261                 break;
4262         case ALLOC_CHUNK:
4263                 trans = btrfs_join_transaction(root);
4264                 if (IS_ERR(trans)) {
4265                         ret = PTR_ERR(trans);
4266                         break;
4267                 }
4268                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4269                                      btrfs_get_alloc_profile(root, 0),
4270                                      CHUNK_ALLOC_NO_FORCE);
4271                 btrfs_end_transaction(trans, root);
4272                 if (ret == -ENOSPC)
4273                         ret = 0;
4274                 break;
4275         case COMMIT_TRANS:
4276                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4277                 break;
4278         default:
4279                 ret = -ENOSPC;
4280                 break;
4281         }
4282
4283         return ret;
4284 }
4285
4286 static inline u64
4287 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4288                                  struct btrfs_space_info *space_info)
4289 {
4290         u64 used;
4291         u64 expected;
4292         u64 to_reclaim;
4293
4294         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4295                                 16 * 1024 * 1024);
4296         spin_lock(&space_info->lock);
4297         if (can_overcommit(root, space_info, to_reclaim,
4298                            BTRFS_RESERVE_FLUSH_ALL)) {
4299                 to_reclaim = 0;
4300                 goto out;
4301         }
4302
4303         used = space_info->bytes_used + space_info->bytes_reserved +
4304                space_info->bytes_pinned + space_info->bytes_readonly +
4305                space_info->bytes_may_use;
4306         if (can_overcommit(root, space_info, 1024 * 1024,
4307                            BTRFS_RESERVE_FLUSH_ALL))
4308                 expected = div_factor_fine(space_info->total_bytes, 95);
4309         else
4310                 expected = div_factor_fine(space_info->total_bytes, 90);
4311
4312         if (used > expected)
4313                 to_reclaim = used - expected;
4314         else
4315                 to_reclaim = 0;
4316         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4317                                      space_info->bytes_reserved);
4318 out:
4319         spin_unlock(&space_info->lock);
4320
4321         return to_reclaim;
4322 }
4323
4324 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4325                                         struct btrfs_fs_info *fs_info, u64 used)
4326 {
4327         return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4328                 !btrfs_fs_closing(fs_info) &&
4329                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4330 }
4331
4332 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4333                                        struct btrfs_fs_info *fs_info,
4334                                        int flush_state)
4335 {
4336         u64 used;
4337
4338         spin_lock(&space_info->lock);
4339         /*
4340          * We run out of space and have not got any free space via flush_space,
4341          * so don't bother doing async reclaim.
4342          */
4343         if (flush_state > COMMIT_TRANS && space_info->full) {
4344                 spin_unlock(&space_info->lock);
4345                 return 0;
4346         }
4347
4348         used = space_info->bytes_used + space_info->bytes_reserved +
4349                space_info->bytes_pinned + space_info->bytes_readonly +
4350                space_info->bytes_may_use;
4351         if (need_do_async_reclaim(space_info, fs_info, used)) {
4352                 spin_unlock(&space_info->lock);
4353                 return 1;
4354         }
4355         spin_unlock(&space_info->lock);
4356
4357         return 0;
4358 }
4359
4360 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4361 {
4362         struct btrfs_fs_info *fs_info;
4363         struct btrfs_space_info *space_info;
4364         u64 to_reclaim;
4365         int flush_state;
4366
4367         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4368         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4369
4370         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4371                                                       space_info);
4372         if (!to_reclaim)
4373                 return;
4374
4375         flush_state = FLUSH_DELAYED_ITEMS_NR;
4376         do {
4377                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4378                             to_reclaim, flush_state);
4379                 flush_state++;
4380                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4381                                                  flush_state))
4382                         return;
4383         } while (flush_state <= COMMIT_TRANS);
4384
4385         if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4386                 queue_work(system_unbound_wq, work);
4387 }
4388
4389 void btrfs_init_async_reclaim_work(struct work_struct *work)
4390 {
4391         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4392 }
4393
4394 /**
4395  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4396  * @root - the root we're allocating for
4397  * @block_rsv - the block_rsv we're allocating for
4398  * @orig_bytes - the number of bytes we want
4399  * @flush - whether or not we can flush to make our reservation
4400  *
4401  * This will reserve orgi_bytes number of bytes from the space info associated
4402  * with the block_rsv.  If there is not enough space it will make an attempt to
4403  * flush out space to make room.  It will do this by flushing delalloc if
4404  * possible or committing the transaction.  If flush is 0 then no attempts to
4405  * regain reservations will be made and this will fail if there is not enough
4406  * space already.
4407  */
4408 static int reserve_metadata_bytes(struct btrfs_root *root,
4409                                   struct btrfs_block_rsv *block_rsv,
4410                                   u64 orig_bytes,
4411                                   enum btrfs_reserve_flush_enum flush)
4412 {
4413         struct btrfs_space_info *space_info = block_rsv->space_info;
4414         u64 used;
4415         u64 num_bytes = orig_bytes;
4416         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4417         int ret = 0;
4418         bool flushing = false;
4419
4420 again:
4421         ret = 0;
4422         spin_lock(&space_info->lock);
4423         /*
4424          * We only want to wait if somebody other than us is flushing and we
4425          * are actually allowed to flush all things.
4426          */
4427         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4428                space_info->flush) {
4429                 spin_unlock(&space_info->lock);
4430                 /*
4431                  * If we have a trans handle we can't wait because the flusher
4432                  * may have to commit the transaction, which would mean we would
4433                  * deadlock since we are waiting for the flusher to finish, but
4434                  * hold the current transaction open.
4435                  */
4436                 if (current->journal_info)
4437                         return -EAGAIN;
4438                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4439                 /* Must have been killed, return */
4440                 if (ret)
4441                         return -EINTR;
4442
4443                 spin_lock(&space_info->lock);
4444         }
4445
4446         ret = -ENOSPC;
4447         used = space_info->bytes_used + space_info->bytes_reserved +
4448                 space_info->bytes_pinned + space_info->bytes_readonly +
4449                 space_info->bytes_may_use;
4450
4451         /*
4452          * The idea here is that we've not already over-reserved the block group
4453          * then we can go ahead and save our reservation first and then start
4454          * flushing if we need to.  Otherwise if we've already overcommitted
4455          * lets start flushing stuff first and then come back and try to make
4456          * our reservation.
4457          */
4458         if (used <= space_info->total_bytes) {
4459                 if (used + orig_bytes <= space_info->total_bytes) {
4460                         space_info->bytes_may_use += orig_bytes;
4461                         trace_btrfs_space_reservation(root->fs_info,
4462                                 "space_info", space_info->flags, orig_bytes, 1);
4463                         ret = 0;
4464                 } else {
4465                         /*
4466                          * Ok set num_bytes to orig_bytes since we aren't
4467                          * overocmmitted, this way we only try and reclaim what
4468                          * we need.
4469                          */
4470                         num_bytes = orig_bytes;
4471                 }
4472         } else {
4473                 /*
4474                  * Ok we're over committed, set num_bytes to the overcommitted
4475                  * amount plus the amount of bytes that we need for this
4476                  * reservation.
4477                  */
4478                 num_bytes = used - space_info->total_bytes +
4479                         (orig_bytes * 2);
4480         }
4481
4482         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4483                 space_info->bytes_may_use += orig_bytes;
4484                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4485                                               space_info->flags, orig_bytes,
4486                                               1);
4487                 ret = 0;
4488         }
4489
4490         /*
4491          * Couldn't make our reservation, save our place so while we're trying
4492          * to reclaim space we can actually use it instead of somebody else
4493          * stealing it from us.
4494          *
4495          * We make the other tasks wait for the flush only when we can flush
4496          * all things.
4497          */
4498         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4499                 flushing = true;
4500                 space_info->flush = 1;
4501         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4502                 used += orig_bytes;
4503                 /*
4504                  * We will do the space reservation dance during log replay,
4505                  * which means we won't have fs_info->fs_root set, so don't do
4506                  * the async reclaim as we will panic.
4507                  */
4508                 if (!root->fs_info->log_root_recovering &&
4509                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4510                     !work_busy(&root->fs_info->async_reclaim_work))
4511                         queue_work(system_unbound_wq,
4512                                    &root->fs_info->async_reclaim_work);
4513         }
4514         spin_unlock(&space_info->lock);
4515
4516         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4517                 goto out;
4518
4519         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4520                           flush_state);
4521         flush_state++;
4522
4523         /*
4524          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4525          * would happen. So skip delalloc flush.
4526          */
4527         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4528             (flush_state == FLUSH_DELALLOC ||
4529              flush_state == FLUSH_DELALLOC_WAIT))
4530                 flush_state = ALLOC_CHUNK;
4531
4532         if (!ret)
4533                 goto again;
4534         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4535                  flush_state < COMMIT_TRANS)
4536                 goto again;
4537         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4538                  flush_state <= COMMIT_TRANS)
4539                 goto again;
4540
4541 out:
4542         if (ret == -ENOSPC &&
4543             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4544                 struct btrfs_block_rsv *global_rsv =
4545                         &root->fs_info->global_block_rsv;
4546
4547                 if (block_rsv != global_rsv &&
4548                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4549                         ret = 0;
4550         }
4551         if (ret == -ENOSPC)
4552                 trace_btrfs_space_reservation(root->fs_info,
4553                                               "space_info:enospc",
4554                                               space_info->flags, orig_bytes, 1);
4555         if (flushing) {
4556                 spin_lock(&space_info->lock);
4557                 space_info->flush = 0;
4558                 wake_up_all(&space_info->wait);
4559                 spin_unlock(&space_info->lock);
4560         }
4561         return ret;
4562 }
4563
4564 static struct btrfs_block_rsv *get_block_rsv(
4565                                         const struct btrfs_trans_handle *trans,
4566                                         const struct btrfs_root *root)
4567 {
4568         struct btrfs_block_rsv *block_rsv = NULL;
4569
4570         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4571                 block_rsv = trans->block_rsv;
4572
4573         if (root == root->fs_info->csum_root && trans->adding_csums)
4574                 block_rsv = trans->block_rsv;
4575
4576         if (root == root->fs_info->uuid_root)
4577                 block_rsv = trans->block_rsv;
4578
4579         if (!block_rsv)
4580                 block_rsv = root->block_rsv;
4581
4582         if (!block_rsv)
4583                 block_rsv = &root->fs_info->empty_block_rsv;
4584
4585         return block_rsv;
4586 }
4587
4588 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4589                                u64 num_bytes)
4590 {
4591         int ret = -ENOSPC;
4592         spin_lock(&block_rsv->lock);
4593         if (block_rsv->reserved >= num_bytes) {
4594                 block_rsv->reserved -= num_bytes;
4595                 if (block_rsv->reserved < block_rsv->size)
4596                         block_rsv->full = 0;
4597                 ret = 0;
4598         }
4599         spin_unlock(&block_rsv->lock);
4600         return ret;
4601 }
4602
4603 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4604                                 u64 num_bytes, int update_size)
4605 {
4606         spin_lock(&block_rsv->lock);
4607         block_rsv->reserved += num_bytes;
4608         if (update_size)
4609                 block_rsv->size += num_bytes;
4610         else if (block_rsv->reserved >= block_rsv->size)
4611                 block_rsv->full = 1;
4612         spin_unlock(&block_rsv->lock);
4613 }
4614
4615 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4616                              struct btrfs_block_rsv *dest, u64 num_bytes,
4617                              int min_factor)
4618 {
4619         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4620         u64 min_bytes;
4621
4622         if (global_rsv->space_info != dest->space_info)
4623                 return -ENOSPC;
4624
4625         spin_lock(&global_rsv->lock);
4626         min_bytes = div_factor(global_rsv->size, min_factor);
4627         if (global_rsv->reserved < min_bytes + num_bytes) {
4628                 spin_unlock(&global_rsv->lock);
4629                 return -ENOSPC;
4630         }
4631         global_rsv->reserved -= num_bytes;
4632         if (global_rsv->reserved < global_rsv->size)
4633                 global_rsv->full = 0;
4634         spin_unlock(&global_rsv->lock);
4635
4636         block_rsv_add_bytes(dest, num_bytes, 1);
4637         return 0;
4638 }
4639
4640 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4641                                     struct btrfs_block_rsv *block_rsv,
4642                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4643 {
4644         struct btrfs_space_info *space_info = block_rsv->space_info;
4645
4646         spin_lock(&block_rsv->lock);
4647         if (num_bytes == (u64)-1)
4648                 num_bytes = block_rsv->size;
4649         block_rsv->size -= num_bytes;
4650         if (block_rsv->reserved >= block_rsv->size) {
4651                 num_bytes = block_rsv->reserved - block_rsv->size;
4652                 block_rsv->reserved = block_rsv->size;
4653                 block_rsv->full = 1;
4654         } else {
4655                 num_bytes = 0;
4656         }
4657         spin_unlock(&block_rsv->lock);
4658
4659         if (num_bytes > 0) {
4660                 if (dest) {
4661                         spin_lock(&dest->lock);
4662                         if (!dest->full) {
4663                                 u64 bytes_to_add;
4664
4665                                 bytes_to_add = dest->size - dest->reserved;
4666                                 bytes_to_add = min(num_bytes, bytes_to_add);
4667                                 dest->reserved += bytes_to_add;
4668                                 if (dest->reserved >= dest->size)
4669                                         dest->full = 1;
4670                                 num_bytes -= bytes_to_add;
4671                         }
4672                         spin_unlock(&dest->lock);
4673                 }
4674                 if (num_bytes) {
4675                         spin_lock(&space_info->lock);
4676                         space_info->bytes_may_use -= num_bytes;
4677                         trace_btrfs_space_reservation(fs_info, "space_info",
4678                                         space_info->flags, num_bytes, 0);
4679                         spin_unlock(&space_info->lock);
4680                 }
4681         }
4682 }
4683
4684 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4685                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4686 {
4687         int ret;
4688
4689         ret = block_rsv_use_bytes(src, num_bytes);
4690         if (ret)
4691                 return ret;
4692
4693         block_rsv_add_bytes(dst, num_bytes, 1);
4694         return 0;
4695 }
4696
4697 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4698 {
4699         memset(rsv, 0, sizeof(*rsv));
4700         spin_lock_init(&rsv->lock);
4701         rsv->type = type;
4702 }
4703
4704 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4705                                               unsigned short type)
4706 {
4707         struct btrfs_block_rsv *block_rsv;
4708         struct btrfs_fs_info *fs_info = root->fs_info;
4709
4710         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4711         if (!block_rsv)
4712                 return NULL;
4713
4714         btrfs_init_block_rsv(block_rsv, type);
4715         block_rsv->space_info = __find_space_info(fs_info,
4716                                                   BTRFS_BLOCK_GROUP_METADATA);
4717         return block_rsv;
4718 }
4719
4720 void btrfs_free_block_rsv(struct btrfs_root *root,
4721                           struct btrfs_block_rsv *rsv)
4722 {
4723         if (!rsv)
4724                 return;
4725         btrfs_block_rsv_release(root, rsv, (u64)-1);
4726         kfree(rsv);
4727 }
4728
4729 int btrfs_block_rsv_add(struct btrfs_root *root,
4730                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4731                         enum btrfs_reserve_flush_enum flush)
4732 {
4733         int ret;
4734
4735         if (num_bytes == 0)
4736                 return 0;
4737
4738         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4739         if (!ret) {
4740                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4741                 return 0;
4742         }
4743
4744         return ret;
4745 }
4746
4747 int btrfs_block_rsv_check(struct btrfs_root *root,
4748                           struct btrfs_block_rsv *block_rsv, int min_factor)
4749 {
4750         u64 num_bytes = 0;
4751         int ret = -ENOSPC;
4752
4753         if (!block_rsv)
4754                 return 0;
4755
4756         spin_lock(&block_rsv->lock);
4757         num_bytes = div_factor(block_rsv->size, min_factor);
4758         if (block_rsv->reserved >= num_bytes)
4759                 ret = 0;
4760         spin_unlock(&block_rsv->lock);
4761
4762         return ret;
4763 }
4764
4765 int btrfs_block_rsv_refill(struct btrfs_root *root,
4766                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4767                            enum btrfs_reserve_flush_enum flush)
4768 {
4769         u64 num_bytes = 0;
4770         int ret = -ENOSPC;
4771
4772         if (!block_rsv)
4773                 return 0;
4774
4775         spin_lock(&block_rsv->lock);
4776         num_bytes = min_reserved;
4777         if (block_rsv->reserved >= num_bytes)
4778                 ret = 0;
4779         else
4780                 num_bytes -= block_rsv->reserved;
4781         spin_unlock(&block_rsv->lock);
4782
4783         if (!ret)
4784                 return 0;
4785
4786         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4787         if (!ret) {
4788                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4789                 return 0;
4790         }
4791
4792         return ret;
4793 }
4794
4795 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4796                             struct btrfs_block_rsv *dst_rsv,
4797                             u64 num_bytes)
4798 {
4799         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4800 }
4801
4802 void btrfs_block_rsv_release(struct btrfs_root *root,
4803                              struct btrfs_block_rsv *block_rsv,
4804                              u64 num_bytes)
4805 {
4806         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4807         if (global_rsv == block_rsv ||
4808             block_rsv->space_info != global_rsv->space_info)
4809                 global_rsv = NULL;
4810         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4811                                 num_bytes);
4812 }
4813
4814 /*
4815  * helper to calculate size of global block reservation.
4816  * the desired value is sum of space used by extent tree,
4817  * checksum tree and root tree
4818  */
4819 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4820 {
4821         struct btrfs_space_info *sinfo;
4822         u64 num_bytes;
4823         u64 meta_used;
4824         u64 data_used;
4825         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4826
4827         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4828         spin_lock(&sinfo->lock);
4829         data_used = sinfo->bytes_used;
4830         spin_unlock(&sinfo->lock);
4831
4832         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4833         spin_lock(&sinfo->lock);
4834         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4835                 data_used = 0;
4836         meta_used = sinfo->bytes_used;
4837         spin_unlock(&sinfo->lock);
4838
4839         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4840                     csum_size * 2;
4841         num_bytes += div_u64(data_used + meta_used, 50);
4842
4843         if (num_bytes * 3 > meta_used)
4844                 num_bytes = div_u64(meta_used, 3);
4845
4846         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4847 }
4848
4849 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4850 {
4851         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4852         struct btrfs_space_info *sinfo = block_rsv->space_info;
4853         u64 num_bytes;
4854
4855         num_bytes = calc_global_metadata_size(fs_info);
4856
4857         spin_lock(&sinfo->lock);
4858         spin_lock(&block_rsv->lock);
4859
4860         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4861
4862         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4863                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4864                     sinfo->bytes_may_use;
4865
4866         if (sinfo->total_bytes > num_bytes) {
4867                 num_bytes = sinfo->total_bytes - num_bytes;
4868                 block_rsv->reserved += num_bytes;
4869                 sinfo->bytes_may_use += num_bytes;
4870                 trace_btrfs_space_reservation(fs_info, "space_info",
4871                                       sinfo->flags, num_bytes, 1);
4872         }
4873
4874         if (block_rsv->reserved >= block_rsv->size) {
4875                 num_bytes = block_rsv->reserved - block_rsv->size;
4876                 sinfo->bytes_may_use -= num_bytes;
4877                 trace_btrfs_space_reservation(fs_info, "space_info",
4878                                       sinfo->flags, num_bytes, 0);
4879                 block_rsv->reserved = block_rsv->size;
4880                 block_rsv->full = 1;
4881         }
4882
4883         spin_unlock(&block_rsv->lock);
4884         spin_unlock(&sinfo->lock);
4885 }
4886
4887 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4888 {
4889         struct btrfs_space_info *space_info;
4890
4891         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4892         fs_info->chunk_block_rsv.space_info = space_info;
4893
4894         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4895         fs_info->global_block_rsv.space_info = space_info;
4896         fs_info->delalloc_block_rsv.space_info = space_info;
4897         fs_info->trans_block_rsv.space_info = space_info;
4898         fs_info->empty_block_rsv.space_info = space_info;
4899         fs_info->delayed_block_rsv.space_info = space_info;
4900
4901         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4902         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4903         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4904         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4905         if (fs_info->quota_root)
4906                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4907         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4908
4909         update_global_block_rsv(fs_info);
4910 }
4911
4912 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4913 {
4914         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4915                                 (u64)-1);
4916         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4917         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4918         WARN_ON(fs_info->trans_block_rsv.size > 0);
4919         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4920         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4921         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4922         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4923         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4924 }
4925
4926 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4927                                   struct btrfs_root *root)
4928 {
4929         if (!trans->block_rsv)
4930                 return;
4931
4932         if (!trans->bytes_reserved)
4933                 return;
4934
4935         trace_btrfs_space_reservation(root->fs_info, "transaction",
4936                                       trans->transid, trans->bytes_reserved, 0);
4937         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4938         trans->bytes_reserved = 0;
4939 }
4940
4941 /* Can only return 0 or -ENOSPC */
4942 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4943                                   struct inode *inode)
4944 {
4945         struct btrfs_root *root = BTRFS_I(inode)->root;
4946         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4947         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4948
4949         /*
4950          * We need to hold space in order to delete our orphan item once we've
4951          * added it, so this takes the reservation so we can release it later
4952          * when we are truly done with the orphan item.
4953          */
4954         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4955         trace_btrfs_space_reservation(root->fs_info, "orphan",
4956                                       btrfs_ino(inode), num_bytes, 1);
4957         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4958 }
4959
4960 void btrfs_orphan_release_metadata(struct inode *inode)
4961 {
4962         struct btrfs_root *root = BTRFS_I(inode)->root;
4963         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4964         trace_btrfs_space_reservation(root->fs_info, "orphan",
4965                                       btrfs_ino(inode), num_bytes, 0);
4966         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4967 }
4968
4969 /*
4970  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4971  * root: the root of the parent directory
4972  * rsv: block reservation
4973  * items: the number of items that we need do reservation
4974  * qgroup_reserved: used to return the reserved size in qgroup
4975  *
4976  * This function is used to reserve the space for snapshot/subvolume
4977  * creation and deletion. Those operations are different with the
4978  * common file/directory operations, they change two fs/file trees
4979  * and root tree, the number of items that the qgroup reserves is
4980  * different with the free space reservation. So we can not use
4981  * the space reseravtion mechanism in start_transaction().
4982  */
4983 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4984                                      struct btrfs_block_rsv *rsv,
4985                                      int items,
4986                                      u64 *qgroup_reserved,
4987                                      bool use_global_rsv)
4988 {
4989         u64 num_bytes;
4990         int ret;
4991         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4992
4993         if (root->fs_info->quota_enabled) {
4994                 /* One for parent inode, two for dir entries */
4995                 num_bytes = 3 * root->nodesize;
4996                 ret = btrfs_qgroup_reserve(root, num_bytes);
4997                 if (ret)
4998                         return ret;
4999         } else {
5000                 num_bytes = 0;
5001         }
5002
5003         *qgroup_reserved = num_bytes;
5004
5005         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5006         rsv->space_info = __find_space_info(root->fs_info,
5007                                             BTRFS_BLOCK_GROUP_METADATA);
5008         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5009                                   BTRFS_RESERVE_FLUSH_ALL);
5010
5011         if (ret == -ENOSPC && use_global_rsv)
5012                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5013
5014         if (ret) {
5015                 if (*qgroup_reserved)
5016                         btrfs_qgroup_free(root, *qgroup_reserved);
5017         }
5018
5019         return ret;
5020 }
5021
5022 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5023                                       struct btrfs_block_rsv *rsv,
5024                                       u64 qgroup_reserved)
5025 {
5026         btrfs_block_rsv_release(root, rsv, (u64)-1);
5027         if (qgroup_reserved)
5028                 btrfs_qgroup_free(root, qgroup_reserved);
5029 }
5030
5031 /**
5032  * drop_outstanding_extent - drop an outstanding extent
5033  * @inode: the inode we're dropping the extent for
5034  * @num_bytes: the number of bytes we're relaseing.
5035  *
5036  * This is called when we are freeing up an outstanding extent, either called
5037  * after an error or after an extent is written.  This will return the number of
5038  * reserved extents that need to be freed.  This must be called with
5039  * BTRFS_I(inode)->lock held.
5040  */
5041 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5042 {
5043         unsigned drop_inode_space = 0;
5044         unsigned dropped_extents = 0;
5045         unsigned num_extents = 0;
5046
5047         num_extents = (unsigned)div64_u64(num_bytes +
5048                                           BTRFS_MAX_EXTENT_SIZE - 1,
5049                                           BTRFS_MAX_EXTENT_SIZE);
5050         ASSERT(num_extents);
5051         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5052         BTRFS_I(inode)->outstanding_extents -= num_extents;
5053
5054         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5055             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5056                                &BTRFS_I(inode)->runtime_flags))
5057                 drop_inode_space = 1;
5058
5059         /*
5060          * If we have more or the same amount of outsanding extents than we have
5061          * reserved then we need to leave the reserved extents count alone.
5062          */
5063         if (BTRFS_I(inode)->outstanding_extents >=
5064             BTRFS_I(inode)->reserved_extents)
5065                 return drop_inode_space;
5066
5067         dropped_extents = BTRFS_I(inode)->reserved_extents -
5068                 BTRFS_I(inode)->outstanding_extents;
5069         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5070         return dropped_extents + drop_inode_space;
5071 }
5072
5073 /**
5074  * calc_csum_metadata_size - return the amount of metada space that must be
5075  *      reserved/free'd for the given bytes.
5076  * @inode: the inode we're manipulating
5077  * @num_bytes: the number of bytes in question
5078  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5079  *
5080  * This adjusts the number of csum_bytes in the inode and then returns the
5081  * correct amount of metadata that must either be reserved or freed.  We
5082  * calculate how many checksums we can fit into one leaf and then divide the
5083  * number of bytes that will need to be checksumed by this value to figure out
5084  * how many checksums will be required.  If we are adding bytes then the number
5085  * may go up and we will return the number of additional bytes that must be
5086  * reserved.  If it is going down we will return the number of bytes that must
5087  * be freed.
5088  *
5089  * This must be called with BTRFS_I(inode)->lock held.
5090  */
5091 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5092                                    int reserve)
5093 {
5094         struct btrfs_root *root = BTRFS_I(inode)->root;
5095         u64 old_csums, num_csums;
5096
5097         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5098             BTRFS_I(inode)->csum_bytes == 0)
5099                 return 0;
5100
5101         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5102         if (reserve)
5103                 BTRFS_I(inode)->csum_bytes += num_bytes;
5104         else
5105                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5106         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5107
5108         /* No change, no need to reserve more */
5109         if (old_csums == num_csums)
5110                 return 0;
5111
5112         if (reserve)
5113                 return btrfs_calc_trans_metadata_size(root,
5114                                                       num_csums - old_csums);
5115
5116         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5117 }
5118
5119 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5120 {
5121         struct btrfs_root *root = BTRFS_I(inode)->root;
5122         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5123         u64 to_reserve = 0;
5124         u64 csum_bytes;
5125         unsigned nr_extents = 0;
5126         int extra_reserve = 0;
5127         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5128         int ret = 0;
5129         bool delalloc_lock = true;
5130         u64 to_free = 0;
5131         unsigned dropped;
5132
5133         /* If we are a free space inode we need to not flush since we will be in
5134          * the middle of a transaction commit.  We also don't need the delalloc
5135          * mutex since we won't race with anybody.  We need this mostly to make
5136          * lockdep shut its filthy mouth.
5137          */
5138         if (btrfs_is_free_space_inode(inode)) {
5139                 flush = BTRFS_RESERVE_NO_FLUSH;
5140                 delalloc_lock = false;
5141         }
5142
5143         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5144             btrfs_transaction_in_commit(root->fs_info))
5145                 schedule_timeout(1);
5146
5147         if (delalloc_lock)
5148                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5149
5150         num_bytes = ALIGN(num_bytes, root->sectorsize);
5151
5152         spin_lock(&BTRFS_I(inode)->lock);
5153         nr_extents = (unsigned)div64_u64(num_bytes +
5154                                          BTRFS_MAX_EXTENT_SIZE - 1,
5155                                          BTRFS_MAX_EXTENT_SIZE);
5156         BTRFS_I(inode)->outstanding_extents += nr_extents;
5157         nr_extents = 0;
5158
5159         if (BTRFS_I(inode)->outstanding_extents >
5160             BTRFS_I(inode)->reserved_extents)
5161                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5162                         BTRFS_I(inode)->reserved_extents;
5163
5164         /*
5165          * Add an item to reserve for updating the inode when we complete the
5166          * delalloc io.
5167          */
5168         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5169                       &BTRFS_I(inode)->runtime_flags)) {
5170                 nr_extents++;
5171                 extra_reserve = 1;
5172         }
5173
5174         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5175         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5176         csum_bytes = BTRFS_I(inode)->csum_bytes;
5177         spin_unlock(&BTRFS_I(inode)->lock);
5178
5179         if (root->fs_info->quota_enabled) {
5180                 ret = btrfs_qgroup_reserve(root, num_bytes +
5181                                            nr_extents * root->nodesize);
5182                 if (ret)
5183                         goto out_fail;
5184         }
5185
5186         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5187         if (unlikely(ret)) {
5188                 if (root->fs_info->quota_enabled)
5189                         btrfs_qgroup_free(root, num_bytes +
5190                                                 nr_extents * root->nodesize);
5191                 goto out_fail;
5192         }
5193
5194         spin_lock(&BTRFS_I(inode)->lock);
5195         if (extra_reserve) {
5196                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5197                         &BTRFS_I(inode)->runtime_flags);
5198                 nr_extents--;
5199         }
5200         BTRFS_I(inode)->reserved_extents += nr_extents;
5201         spin_unlock(&BTRFS_I(inode)->lock);
5202
5203         if (delalloc_lock)
5204                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5205
5206         if (to_reserve)
5207                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5208                                               btrfs_ino(inode), to_reserve, 1);
5209         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5210
5211         return 0;
5212
5213 out_fail:
5214         spin_lock(&BTRFS_I(inode)->lock);
5215         dropped = drop_outstanding_extent(inode, num_bytes);
5216         /*
5217          * If the inodes csum_bytes is the same as the original
5218          * csum_bytes then we know we haven't raced with any free()ers
5219          * so we can just reduce our inodes csum bytes and carry on.
5220          */
5221         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5222                 calc_csum_metadata_size(inode, num_bytes, 0);
5223         } else {
5224                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5225                 u64 bytes;
5226
5227                 /*
5228                  * This is tricky, but first we need to figure out how much we
5229                  * free'd from any free-ers that occured during this
5230                  * reservation, so we reset ->csum_bytes to the csum_bytes
5231                  * before we dropped our lock, and then call the free for the
5232                  * number of bytes that were freed while we were trying our
5233                  * reservation.
5234                  */
5235                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5236                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5237                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5238
5239
5240                 /*
5241                  * Now we need to see how much we would have freed had we not
5242                  * been making this reservation and our ->csum_bytes were not
5243                  * artificially inflated.
5244                  */
5245                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5246                 bytes = csum_bytes - orig_csum_bytes;
5247                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5248
5249                 /*
5250                  * Now reset ->csum_bytes to what it should be.  If bytes is
5251                  * more than to_free then we would have free'd more space had we
5252                  * not had an artificially high ->csum_bytes, so we need to free
5253                  * the remainder.  If bytes is the same or less then we don't
5254                  * need to do anything, the other free-ers did the correct
5255                  * thing.
5256                  */
5257                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5258                 if (bytes > to_free)
5259                         to_free = bytes - to_free;
5260                 else
5261                         to_free = 0;
5262         }
5263         spin_unlock(&BTRFS_I(inode)->lock);
5264         if (dropped)
5265                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5266
5267         if (to_free) {
5268                 btrfs_block_rsv_release(root, block_rsv, to_free);
5269                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5270                                               btrfs_ino(inode), to_free, 0);
5271         }
5272         if (delalloc_lock)
5273                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5274         return ret;
5275 }
5276
5277 /**
5278  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5279  * @inode: the inode to release the reservation for
5280  * @num_bytes: the number of bytes we're releasing
5281  *
5282  * This will release the metadata reservation for an inode.  This can be called
5283  * once we complete IO for a given set of bytes to release their metadata
5284  * reservations.
5285  */
5286 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5287 {
5288         struct btrfs_root *root = BTRFS_I(inode)->root;
5289         u64 to_free = 0;
5290         unsigned dropped;
5291
5292         num_bytes = ALIGN(num_bytes, root->sectorsize);
5293         spin_lock(&BTRFS_I(inode)->lock);
5294         dropped = drop_outstanding_extent(inode, num_bytes);
5295
5296         if (num_bytes)
5297                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5298         spin_unlock(&BTRFS_I(inode)->lock);
5299         if (dropped > 0)
5300                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5301
5302         if (btrfs_test_is_dummy_root(root))
5303                 return;
5304
5305         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5306                                       btrfs_ino(inode), to_free, 0);
5307         if (root->fs_info->quota_enabled) {
5308                 btrfs_qgroup_free(root, num_bytes +
5309                                         dropped * root->nodesize);
5310         }
5311
5312         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5313                                 to_free);
5314 }
5315
5316 /**
5317  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5318  * @inode: inode we're writing to
5319  * @num_bytes: the number of bytes we want to allocate
5320  *
5321  * This will do the following things
5322  *
5323  * o reserve space in the data space info for num_bytes
5324  * o reserve space in the metadata space info based on number of outstanding
5325  *   extents and how much csums will be needed
5326  * o add to the inodes ->delalloc_bytes
5327  * o add it to the fs_info's delalloc inodes list.
5328  *
5329  * This will return 0 for success and -ENOSPC if there is no space left.
5330  */
5331 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5332 {
5333         int ret;
5334
5335         ret = btrfs_check_data_free_space(inode, num_bytes);
5336         if (ret)
5337                 return ret;
5338
5339         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5340         if (ret) {
5341                 btrfs_free_reserved_data_space(inode, num_bytes);
5342                 return ret;
5343         }
5344
5345         return 0;
5346 }
5347
5348 /**
5349  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5350  * @inode: inode we're releasing space for
5351  * @num_bytes: the number of bytes we want to free up
5352  *
5353  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5354  * called in the case that we don't need the metadata AND data reservations
5355  * anymore.  So if there is an error or we insert an inline extent.
5356  *
5357  * This function will release the metadata space that was not used and will
5358  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5359  * list if there are no delalloc bytes left.
5360  */
5361 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5362 {
5363         btrfs_delalloc_release_metadata(inode, num_bytes);
5364         btrfs_free_reserved_data_space(inode, num_bytes);
5365 }
5366
5367 static int update_block_group(struct btrfs_trans_handle *trans,
5368                               struct btrfs_root *root, u64 bytenr,
5369                               u64 num_bytes, int alloc)
5370 {
5371         struct btrfs_block_group_cache *cache = NULL;
5372         struct btrfs_fs_info *info = root->fs_info;
5373         u64 total = num_bytes;
5374         u64 old_val;
5375         u64 byte_in_group;
5376         int factor;
5377
5378         /* block accounting for super block */
5379         spin_lock(&info->delalloc_root_lock);
5380         old_val = btrfs_super_bytes_used(info->super_copy);
5381         if (alloc)
5382                 old_val += num_bytes;
5383         else
5384                 old_val -= num_bytes;
5385         btrfs_set_super_bytes_used(info->super_copy, old_val);
5386         spin_unlock(&info->delalloc_root_lock);
5387
5388         while (total) {
5389                 cache = btrfs_lookup_block_group(info, bytenr);
5390                 if (!cache)
5391                         return -ENOENT;
5392                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5393                                     BTRFS_BLOCK_GROUP_RAID1 |
5394                                     BTRFS_BLOCK_GROUP_RAID10))
5395                         factor = 2;
5396                 else
5397                         factor = 1;
5398                 /*
5399                  * If this block group has free space cache written out, we
5400                  * need to make sure to load it if we are removing space.  This
5401                  * is because we need the unpinning stage to actually add the
5402                  * space back to the block group, otherwise we will leak space.
5403                  */
5404                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5405                         cache_block_group(cache, 1);
5406
5407                 spin_lock(&trans->transaction->dirty_bgs_lock);
5408                 if (list_empty(&cache->dirty_list)) {
5409                         list_add_tail(&cache->dirty_list,
5410                                       &trans->transaction->dirty_bgs);
5411                         btrfs_get_block_group(cache);
5412                 }
5413                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5414
5415                 byte_in_group = bytenr - cache->key.objectid;
5416                 WARN_ON(byte_in_group > cache->key.offset);
5417
5418                 spin_lock(&cache->space_info->lock);
5419                 spin_lock(&cache->lock);
5420
5421                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5422                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5423                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5424
5425                 old_val = btrfs_block_group_used(&cache->item);
5426                 num_bytes = min(total, cache->key.offset - byte_in_group);
5427                 if (alloc) {
5428                         old_val += num_bytes;
5429                         btrfs_set_block_group_used(&cache->item, old_val);
5430                         cache->reserved -= num_bytes;
5431                         cache->space_info->bytes_reserved -= num_bytes;
5432                         cache->space_info->bytes_used += num_bytes;
5433                         cache->space_info->disk_used += num_bytes * factor;
5434                         spin_unlock(&cache->lock);
5435                         spin_unlock(&cache->space_info->lock);
5436                 } else {
5437                         old_val -= num_bytes;
5438                         btrfs_set_block_group_used(&cache->item, old_val);
5439                         cache->pinned += num_bytes;
5440                         cache->space_info->bytes_pinned += num_bytes;
5441                         cache->space_info->bytes_used -= num_bytes;
5442                         cache->space_info->disk_used -= num_bytes * factor;
5443                         spin_unlock(&cache->lock);
5444                         spin_unlock(&cache->space_info->lock);
5445
5446                         set_extent_dirty(info->pinned_extents,
5447                                          bytenr, bytenr + num_bytes - 1,
5448                                          GFP_NOFS | __GFP_NOFAIL);
5449                         /*
5450                          * No longer have used bytes in this block group, queue
5451                          * it for deletion.
5452                          */
5453                         if (old_val == 0) {
5454                                 spin_lock(&info->unused_bgs_lock);
5455                                 if (list_empty(&cache->bg_list)) {
5456                                         btrfs_get_block_group(cache);
5457                                         list_add_tail(&cache->bg_list,
5458                                                       &info->unused_bgs);
5459                                 }
5460                                 spin_unlock(&info->unused_bgs_lock);
5461                         }
5462                 }
5463                 btrfs_put_block_group(cache);
5464                 total -= num_bytes;
5465                 bytenr += num_bytes;
5466         }
5467         return 0;
5468 }
5469
5470 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5471 {
5472         struct btrfs_block_group_cache *cache;
5473         u64 bytenr;
5474
5475         spin_lock(&root->fs_info->block_group_cache_lock);
5476         bytenr = root->fs_info->first_logical_byte;
5477         spin_unlock(&root->fs_info->block_group_cache_lock);
5478
5479         if (bytenr < (u64)-1)
5480                 return bytenr;
5481
5482         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5483         if (!cache)
5484                 return 0;
5485
5486         bytenr = cache->key.objectid;
5487         btrfs_put_block_group(cache);
5488
5489         return bytenr;
5490 }
5491
5492 static int pin_down_extent(struct btrfs_root *root,
5493                            struct btrfs_block_group_cache *cache,
5494                            u64 bytenr, u64 num_bytes, int reserved)
5495 {
5496         spin_lock(&cache->space_info->lock);
5497         spin_lock(&cache->lock);
5498         cache->pinned += num_bytes;
5499         cache->space_info->bytes_pinned += num_bytes;
5500         if (reserved) {
5501                 cache->reserved -= num_bytes;
5502                 cache->space_info->bytes_reserved -= num_bytes;
5503         }
5504         spin_unlock(&cache->lock);
5505         spin_unlock(&cache->space_info->lock);
5506
5507         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5508                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5509         if (reserved)
5510                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5511         return 0;
5512 }
5513
5514 /*
5515  * this function must be called within transaction
5516  */
5517 int btrfs_pin_extent(struct btrfs_root *root,
5518                      u64 bytenr, u64 num_bytes, int reserved)
5519 {
5520         struct btrfs_block_group_cache *cache;
5521
5522         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5523         BUG_ON(!cache); /* Logic error */
5524
5525         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5526
5527         btrfs_put_block_group(cache);
5528         return 0;
5529 }
5530
5531 /*
5532  * this function must be called within transaction
5533  */
5534 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5535                                     u64 bytenr, u64 num_bytes)
5536 {
5537         struct btrfs_block_group_cache *cache;
5538         int ret;
5539
5540         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5541         if (!cache)
5542                 return -EINVAL;
5543
5544         /*
5545          * pull in the free space cache (if any) so that our pin
5546          * removes the free space from the cache.  We have load_only set
5547          * to one because the slow code to read in the free extents does check
5548          * the pinned extents.
5549          */
5550         cache_block_group(cache, 1);
5551
5552         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5553
5554         /* remove us from the free space cache (if we're there at all) */
5555         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5556         btrfs_put_block_group(cache);
5557         return ret;
5558 }
5559
5560 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5561 {
5562         int ret;
5563         struct btrfs_block_group_cache *block_group;
5564         struct btrfs_caching_control *caching_ctl;
5565
5566         block_group = btrfs_lookup_block_group(root->fs_info, start);
5567         if (!block_group)
5568                 return -EINVAL;
5569
5570         cache_block_group(block_group, 0);
5571         caching_ctl = get_caching_control(block_group);
5572
5573         if (!caching_ctl) {
5574                 /* Logic error */
5575                 BUG_ON(!block_group_cache_done(block_group));
5576                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5577         } else {
5578                 mutex_lock(&caching_ctl->mutex);
5579
5580                 if (start >= caching_ctl->progress) {
5581                         ret = add_excluded_extent(root, start, num_bytes);
5582                 } else if (start + num_bytes <= caching_ctl->progress) {
5583                         ret = btrfs_remove_free_space(block_group,
5584                                                       start, num_bytes);
5585                 } else {
5586                         num_bytes = caching_ctl->progress - start;
5587                         ret = btrfs_remove_free_space(block_group,
5588                                                       start, num_bytes);
5589                         if (ret)
5590                                 goto out_lock;
5591
5592                         num_bytes = (start + num_bytes) -
5593                                 caching_ctl->progress;
5594                         start = caching_ctl->progress;
5595                         ret = add_excluded_extent(root, start, num_bytes);
5596                 }
5597 out_lock:
5598                 mutex_unlock(&caching_ctl->mutex);
5599                 put_caching_control(caching_ctl);
5600         }
5601         btrfs_put_block_group(block_group);
5602         return ret;
5603 }
5604
5605 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5606                                  struct extent_buffer *eb)
5607 {
5608         struct btrfs_file_extent_item *item;
5609         struct btrfs_key key;
5610         int found_type;
5611         int i;
5612
5613         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5614                 return 0;
5615
5616         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5617                 btrfs_item_key_to_cpu(eb, &key, i);
5618                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5619                         continue;
5620                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5621                 found_type = btrfs_file_extent_type(eb, item);
5622                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5623                         continue;
5624                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5625                         continue;
5626                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5627                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5628                 __exclude_logged_extent(log, key.objectid, key.offset);
5629         }
5630
5631         return 0;
5632 }
5633
5634 /**
5635  * btrfs_update_reserved_bytes - update the block_group and space info counters
5636  * @cache:      The cache we are manipulating
5637  * @num_bytes:  The number of bytes in question
5638  * @reserve:    One of the reservation enums
5639  * @delalloc:   The blocks are allocated for the delalloc write
5640  *
5641  * This is called by the allocator when it reserves space, or by somebody who is
5642  * freeing space that was never actually used on disk.  For example if you
5643  * reserve some space for a new leaf in transaction A and before transaction A
5644  * commits you free that leaf, you call this with reserve set to 0 in order to
5645  * clear the reservation.
5646  *
5647  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5648  * ENOSPC accounting.  For data we handle the reservation through clearing the
5649  * delalloc bits in the io_tree.  We have to do this since we could end up
5650  * allocating less disk space for the amount of data we have reserved in the
5651  * case of compression.
5652  *
5653  * If this is a reservation and the block group has become read only we cannot
5654  * make the reservation and return -EAGAIN, otherwise this function always
5655  * succeeds.
5656  */
5657 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5658                                        u64 num_bytes, int reserve, int delalloc)
5659 {
5660         struct btrfs_space_info *space_info = cache->space_info;
5661         int ret = 0;
5662
5663         spin_lock(&space_info->lock);
5664         spin_lock(&cache->lock);
5665         if (reserve != RESERVE_FREE) {
5666                 if (cache->ro) {
5667                         ret = -EAGAIN;
5668                 } else {
5669                         cache->reserved += num_bytes;
5670                         space_info->bytes_reserved += num_bytes;
5671                         if (reserve == RESERVE_ALLOC) {
5672                                 trace_btrfs_space_reservation(cache->fs_info,
5673                                                 "space_info", space_info->flags,
5674                                                 num_bytes, 0);
5675                                 space_info->bytes_may_use -= num_bytes;
5676                         }
5677
5678                         if (delalloc)
5679                                 cache->delalloc_bytes += num_bytes;
5680                 }
5681         } else {
5682                 if (cache->ro)
5683                         space_info->bytes_readonly += num_bytes;
5684                 cache->reserved -= num_bytes;
5685                 space_info->bytes_reserved -= num_bytes;
5686
5687                 if (delalloc)
5688                         cache->delalloc_bytes -= num_bytes;
5689         }
5690         spin_unlock(&cache->lock);
5691         spin_unlock(&space_info->lock);
5692         return ret;
5693 }
5694
5695 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5696                                 struct btrfs_root *root)
5697 {
5698         struct btrfs_fs_info *fs_info = root->fs_info;
5699         struct btrfs_caching_control *next;
5700         struct btrfs_caching_control *caching_ctl;
5701         struct btrfs_block_group_cache *cache;
5702
5703         down_write(&fs_info->commit_root_sem);
5704
5705         list_for_each_entry_safe(caching_ctl, next,
5706                                  &fs_info->caching_block_groups, list) {
5707                 cache = caching_ctl->block_group;
5708                 if (block_group_cache_done(cache)) {
5709                         cache->last_byte_to_unpin = (u64)-1;
5710                         list_del_init(&caching_ctl->list);
5711                         put_caching_control(caching_ctl);
5712                 } else {
5713                         cache->last_byte_to_unpin = caching_ctl->progress;
5714                 }
5715         }
5716
5717         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5718                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5719         else
5720                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5721
5722         up_write(&fs_info->commit_root_sem);
5723
5724         update_global_block_rsv(fs_info);
5725 }
5726
5727 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
5728                               const bool return_free_space)
5729 {
5730         struct btrfs_fs_info *fs_info = root->fs_info;
5731         struct btrfs_block_group_cache *cache = NULL;
5732         struct btrfs_space_info *space_info;
5733         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5734         u64 len;
5735         bool readonly;
5736
5737         while (start <= end) {
5738                 readonly = false;
5739                 if (!cache ||
5740                     start >= cache->key.objectid + cache->key.offset) {
5741                         if (cache)
5742                                 btrfs_put_block_group(cache);
5743                         cache = btrfs_lookup_block_group(fs_info, start);
5744                         BUG_ON(!cache); /* Logic error */
5745                 }
5746
5747                 len = cache->key.objectid + cache->key.offset - start;
5748                 len = min(len, end + 1 - start);
5749
5750                 if (start < cache->last_byte_to_unpin) {
5751                         len = min(len, cache->last_byte_to_unpin - start);
5752                         if (return_free_space)
5753                                 btrfs_add_free_space(cache, start, len);
5754                 }
5755
5756                 start += len;
5757                 space_info = cache->space_info;
5758
5759                 spin_lock(&space_info->lock);
5760                 spin_lock(&cache->lock);
5761                 cache->pinned -= len;
5762                 space_info->bytes_pinned -= len;
5763                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5764                 if (cache->ro) {
5765                         space_info->bytes_readonly += len;
5766                         readonly = true;
5767                 }
5768                 spin_unlock(&cache->lock);
5769                 if (!readonly && global_rsv->space_info == space_info) {
5770                         spin_lock(&global_rsv->lock);
5771                         if (!global_rsv->full) {
5772                                 len = min(len, global_rsv->size -
5773                                           global_rsv->reserved);
5774                                 global_rsv->reserved += len;
5775                                 space_info->bytes_may_use += len;
5776                                 if (global_rsv->reserved >= global_rsv->size)
5777                                         global_rsv->full = 1;
5778                         }
5779                         spin_unlock(&global_rsv->lock);
5780                 }
5781                 spin_unlock(&space_info->lock);
5782         }
5783
5784         if (cache)
5785                 btrfs_put_block_group(cache);
5786         return 0;
5787 }
5788
5789 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5790                                struct btrfs_root *root)
5791 {
5792         struct btrfs_fs_info *fs_info = root->fs_info;
5793         struct extent_io_tree *unpin;
5794         u64 start;
5795         u64 end;
5796         int ret;
5797
5798         if (trans->aborted)
5799                 return 0;
5800
5801         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5802                 unpin = &fs_info->freed_extents[1];
5803         else
5804                 unpin = &fs_info->freed_extents[0];
5805
5806         while (1) {
5807                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
5808                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5809                                             EXTENT_DIRTY, NULL);
5810                 if (ret) {
5811                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5812                         break;
5813                 }
5814
5815                 if (btrfs_test_opt(root, DISCARD))
5816                         ret = btrfs_discard_extent(root, start,
5817                                                    end + 1 - start, NULL);
5818
5819                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5820                 unpin_extent_range(root, start, end, true);
5821                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5822                 cond_resched();
5823         }
5824
5825         return 0;
5826 }
5827
5828 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5829                              u64 owner, u64 root_objectid)
5830 {
5831         struct btrfs_space_info *space_info;
5832         u64 flags;
5833
5834         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5835                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5836                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5837                 else
5838                         flags = BTRFS_BLOCK_GROUP_METADATA;
5839         } else {
5840                 flags = BTRFS_BLOCK_GROUP_DATA;
5841         }
5842
5843         space_info = __find_space_info(fs_info, flags);
5844         BUG_ON(!space_info); /* Logic bug */
5845         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5846 }
5847
5848
5849 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5850                                 struct btrfs_root *root,
5851                                 u64 bytenr, u64 num_bytes, u64 parent,
5852                                 u64 root_objectid, u64 owner_objectid,
5853                                 u64 owner_offset, int refs_to_drop,
5854                                 struct btrfs_delayed_extent_op *extent_op,
5855                                 int no_quota)
5856 {
5857         struct btrfs_key key;
5858         struct btrfs_path *path;
5859         struct btrfs_fs_info *info = root->fs_info;
5860         struct btrfs_root *extent_root = info->extent_root;
5861         struct extent_buffer *leaf;
5862         struct btrfs_extent_item *ei;
5863         struct btrfs_extent_inline_ref *iref;
5864         int ret;
5865         int is_data;
5866         int extent_slot = 0;
5867         int found_extent = 0;
5868         int num_to_del = 1;
5869         u32 item_size;
5870         u64 refs;
5871         int last_ref = 0;
5872         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5873         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5874                                                  SKINNY_METADATA);
5875
5876         if (!info->quota_enabled || !is_fstree(root_objectid))
5877                 no_quota = 1;
5878
5879         path = btrfs_alloc_path();
5880         if (!path)
5881                 return -ENOMEM;
5882
5883         path->reada = 1;
5884         path->leave_spinning = 1;
5885
5886         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5887         BUG_ON(!is_data && refs_to_drop != 1);
5888
5889         if (is_data)
5890                 skinny_metadata = 0;
5891
5892         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5893                                     bytenr, num_bytes, parent,
5894                                     root_objectid, owner_objectid,
5895                                     owner_offset);
5896         if (ret == 0) {
5897                 extent_slot = path->slots[0];
5898                 while (extent_slot >= 0) {
5899                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5900                                               extent_slot);
5901                         if (key.objectid != bytenr)
5902                                 break;
5903                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5904                             key.offset == num_bytes) {
5905                                 found_extent = 1;
5906                                 break;
5907                         }
5908                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5909                             key.offset == owner_objectid) {
5910                                 found_extent = 1;
5911                                 break;
5912                         }
5913                         if (path->slots[0] - extent_slot > 5)
5914                                 break;
5915                         extent_slot--;
5916                 }
5917 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5918                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5919                 if (found_extent && item_size < sizeof(*ei))
5920                         found_extent = 0;
5921 #endif
5922                 if (!found_extent) {
5923                         BUG_ON(iref);
5924                         ret = remove_extent_backref(trans, extent_root, path,
5925                                                     NULL, refs_to_drop,
5926                                                     is_data, &last_ref);
5927                         if (ret) {
5928                                 btrfs_abort_transaction(trans, extent_root, ret);
5929                                 goto out;
5930                         }
5931                         btrfs_release_path(path);
5932                         path->leave_spinning = 1;
5933
5934                         key.objectid = bytenr;
5935                         key.type = BTRFS_EXTENT_ITEM_KEY;
5936                         key.offset = num_bytes;
5937
5938                         if (!is_data && skinny_metadata) {
5939                                 key.type = BTRFS_METADATA_ITEM_KEY;
5940                                 key.offset = owner_objectid;
5941                         }
5942
5943                         ret = btrfs_search_slot(trans, extent_root,
5944                                                 &key, path, -1, 1);
5945                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5946                                 /*
5947                                  * Couldn't find our skinny metadata item,
5948                                  * see if we have ye olde extent item.
5949                                  */
5950                                 path->slots[0]--;
5951                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5952                                                       path->slots[0]);
5953                                 if (key.objectid == bytenr &&
5954                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5955                                     key.offset == num_bytes)
5956                                         ret = 0;
5957                         }
5958
5959                         if (ret > 0 && skinny_metadata) {
5960                                 skinny_metadata = false;
5961                                 key.objectid = bytenr;
5962                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5963                                 key.offset = num_bytes;
5964                                 btrfs_release_path(path);
5965                                 ret = btrfs_search_slot(trans, extent_root,
5966                                                         &key, path, -1, 1);
5967                         }
5968
5969                         if (ret) {
5970                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5971                                         ret, bytenr);
5972                                 if (ret > 0)
5973                                         btrfs_print_leaf(extent_root,
5974                                                          path->nodes[0]);
5975                         }
5976                         if (ret < 0) {
5977                                 btrfs_abort_transaction(trans, extent_root, ret);
5978                                 goto out;
5979                         }
5980                         extent_slot = path->slots[0];
5981                 }
5982         } else if (WARN_ON(ret == -ENOENT)) {
5983                 btrfs_print_leaf(extent_root, path->nodes[0]);
5984                 btrfs_err(info,
5985                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5986                         bytenr, parent, root_objectid, owner_objectid,
5987                         owner_offset);
5988                 btrfs_abort_transaction(trans, extent_root, ret);
5989                 goto out;
5990         } else {
5991                 btrfs_abort_transaction(trans, extent_root, ret);
5992                 goto out;
5993         }
5994
5995         leaf = path->nodes[0];
5996         item_size = btrfs_item_size_nr(leaf, extent_slot);
5997 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5998         if (item_size < sizeof(*ei)) {
5999                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6000                 ret = convert_extent_item_v0(trans, extent_root, path,
6001                                              owner_objectid, 0);
6002                 if (ret < 0) {
6003                         btrfs_abort_transaction(trans, extent_root, ret);
6004                         goto out;
6005                 }
6006
6007                 btrfs_release_path(path);
6008                 path->leave_spinning = 1;
6009
6010                 key.objectid = bytenr;
6011                 key.type = BTRFS_EXTENT_ITEM_KEY;
6012                 key.offset = num_bytes;
6013
6014                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6015                                         -1, 1);
6016                 if (ret) {
6017                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6018                                 ret, bytenr);
6019                         btrfs_print_leaf(extent_root, path->nodes[0]);
6020                 }
6021                 if (ret < 0) {
6022                         btrfs_abort_transaction(trans, extent_root, ret);
6023                         goto out;
6024                 }
6025
6026                 extent_slot = path->slots[0];
6027                 leaf = path->nodes[0];
6028                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6029         }
6030 #endif
6031         BUG_ON(item_size < sizeof(*ei));
6032         ei = btrfs_item_ptr(leaf, extent_slot,
6033                             struct btrfs_extent_item);
6034         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6035             key.type == BTRFS_EXTENT_ITEM_KEY) {
6036                 struct btrfs_tree_block_info *bi;
6037                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6038                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6039                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6040         }
6041
6042         refs = btrfs_extent_refs(leaf, ei);
6043         if (refs < refs_to_drop) {
6044                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6045                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6046                 ret = -EINVAL;
6047                 btrfs_abort_transaction(trans, extent_root, ret);
6048                 goto out;
6049         }
6050         refs -= refs_to_drop;
6051
6052         if (refs > 0) {
6053                 type = BTRFS_QGROUP_OPER_SUB_SHARED;
6054                 if (extent_op)
6055                         __run_delayed_extent_op(extent_op, leaf, ei);
6056                 /*
6057                  * In the case of inline back ref, reference count will
6058                  * be updated by remove_extent_backref
6059                  */
6060                 if (iref) {
6061                         BUG_ON(!found_extent);
6062                 } else {
6063                         btrfs_set_extent_refs(leaf, ei, refs);
6064                         btrfs_mark_buffer_dirty(leaf);
6065                 }
6066                 if (found_extent) {
6067                         ret = remove_extent_backref(trans, extent_root, path,
6068                                                     iref, refs_to_drop,
6069                                                     is_data, &last_ref);
6070                         if (ret) {
6071                                 btrfs_abort_transaction(trans, extent_root, ret);
6072                                 goto out;
6073                         }
6074                 }
6075                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6076                                  root_objectid);
6077         } else {
6078                 if (found_extent) {
6079                         BUG_ON(is_data && refs_to_drop !=
6080                                extent_data_ref_count(root, path, iref));
6081                         if (iref) {
6082                                 BUG_ON(path->slots[0] != extent_slot);
6083                         } else {
6084                                 BUG_ON(path->slots[0] != extent_slot + 1);
6085                                 path->slots[0] = extent_slot;
6086                                 num_to_del = 2;
6087                         }
6088                 }
6089
6090                 last_ref = 1;
6091                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6092                                       num_to_del);
6093                 if (ret) {
6094                         btrfs_abort_transaction(trans, extent_root, ret);
6095                         goto out;
6096                 }
6097                 btrfs_release_path(path);
6098
6099                 if (is_data) {
6100                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6101                         if (ret) {
6102                                 btrfs_abort_transaction(trans, extent_root, ret);
6103                                 goto out;
6104                         }
6105                 }
6106
6107                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6108                 if (ret) {
6109                         btrfs_abort_transaction(trans, extent_root, ret);
6110                         goto out;
6111                 }
6112         }
6113         btrfs_release_path(path);
6114
6115         /* Deal with the quota accounting */
6116         if (!ret && last_ref && !no_quota) {
6117                 int mod_seq = 0;
6118
6119                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6120                     type == BTRFS_QGROUP_OPER_SUB_SHARED)
6121                         mod_seq = 1;
6122
6123                 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6124                                               bytenr, num_bytes, type,
6125                                               mod_seq);
6126         }
6127 out:
6128         btrfs_free_path(path);
6129         return ret;
6130 }
6131
6132 /*
6133  * when we free an block, it is possible (and likely) that we free the last
6134  * delayed ref for that extent as well.  This searches the delayed ref tree for
6135  * a given extent, and if there are no other delayed refs to be processed, it
6136  * removes it from the tree.
6137  */
6138 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6139                                       struct btrfs_root *root, u64 bytenr)
6140 {
6141         struct btrfs_delayed_ref_head *head;
6142         struct btrfs_delayed_ref_root *delayed_refs;
6143         int ret = 0;
6144
6145         delayed_refs = &trans->transaction->delayed_refs;
6146         spin_lock(&delayed_refs->lock);
6147         head = btrfs_find_delayed_ref_head(trans, bytenr);
6148         if (!head)
6149                 goto out_delayed_unlock;
6150
6151         spin_lock(&head->lock);
6152         if (rb_first(&head->ref_root))
6153                 goto out;
6154
6155         if (head->extent_op) {
6156                 if (!head->must_insert_reserved)
6157                         goto out;
6158                 btrfs_free_delayed_extent_op(head->extent_op);
6159                 head->extent_op = NULL;
6160         }
6161
6162         /*
6163          * waiting for the lock here would deadlock.  If someone else has it
6164          * locked they are already in the process of dropping it anyway
6165          */
6166         if (!mutex_trylock(&head->mutex))
6167                 goto out;
6168
6169         /*
6170          * at this point we have a head with no other entries.  Go
6171          * ahead and process it.
6172          */
6173         head->node.in_tree = 0;
6174         rb_erase(&head->href_node, &delayed_refs->href_root);
6175
6176         atomic_dec(&delayed_refs->num_entries);
6177
6178         /*
6179          * we don't take a ref on the node because we're removing it from the
6180          * tree, so we just steal the ref the tree was holding.
6181          */
6182         delayed_refs->num_heads--;
6183         if (head->processing == 0)
6184                 delayed_refs->num_heads_ready--;
6185         head->processing = 0;
6186         spin_unlock(&head->lock);
6187         spin_unlock(&delayed_refs->lock);
6188
6189         BUG_ON(head->extent_op);
6190         if (head->must_insert_reserved)
6191                 ret = 1;
6192
6193         mutex_unlock(&head->mutex);
6194         btrfs_put_delayed_ref(&head->node);
6195         return ret;
6196 out:
6197         spin_unlock(&head->lock);
6198
6199 out_delayed_unlock:
6200         spin_unlock(&delayed_refs->lock);
6201         return 0;
6202 }
6203
6204 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6205                            struct btrfs_root *root,
6206                            struct extent_buffer *buf,
6207                            u64 parent, int last_ref)
6208 {
6209         int pin = 1;
6210         int ret;
6211
6212         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6213                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6214                                         buf->start, buf->len,
6215                                         parent, root->root_key.objectid,
6216                                         btrfs_header_level(buf),
6217                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6218                 BUG_ON(ret); /* -ENOMEM */
6219         }
6220
6221         if (!last_ref)
6222                 return;
6223
6224         if (btrfs_header_generation(buf) == trans->transid) {
6225                 struct btrfs_block_group_cache *cache;
6226
6227                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6228                         ret = check_ref_cleanup(trans, root, buf->start);
6229                         if (!ret)
6230                                 goto out;
6231                 }
6232
6233                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6234
6235                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6236                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6237                         btrfs_put_block_group(cache);
6238                         goto out;
6239                 }
6240
6241                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6242
6243                 btrfs_add_free_space(cache, buf->start, buf->len);
6244                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6245                 btrfs_put_block_group(cache);
6246                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6247                 pin = 0;
6248         }
6249 out:
6250         if (pin)
6251                 add_pinned_bytes(root->fs_info, buf->len,
6252                                  btrfs_header_level(buf),
6253                                  root->root_key.objectid);
6254
6255         /*
6256          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6257          * anymore.
6258          */
6259         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6260 }
6261
6262 /* Can return -ENOMEM */
6263 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6264                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6265                       u64 owner, u64 offset, int no_quota)
6266 {
6267         int ret;
6268         struct btrfs_fs_info *fs_info = root->fs_info;
6269
6270         if (btrfs_test_is_dummy_root(root))
6271                 return 0;
6272
6273         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6274
6275         /*
6276          * tree log blocks never actually go into the extent allocation
6277          * tree, just update pinning info and exit early.
6278          */
6279         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6280                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6281                 /* unlocks the pinned mutex */
6282                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6283                 ret = 0;
6284         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6285                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6286                                         num_bytes,
6287                                         parent, root_objectid, (int)owner,
6288                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6289         } else {
6290                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6291                                                 num_bytes,
6292                                                 parent, root_objectid, owner,
6293                                                 offset, BTRFS_DROP_DELAYED_REF,
6294                                                 NULL, no_quota);
6295         }
6296         return ret;
6297 }
6298
6299 /*
6300  * when we wait for progress in the block group caching, its because
6301  * our allocation attempt failed at least once.  So, we must sleep
6302  * and let some progress happen before we try again.
6303  *
6304  * This function will sleep at least once waiting for new free space to
6305  * show up, and then it will check the block group free space numbers
6306  * for our min num_bytes.  Another option is to have it go ahead
6307  * and look in the rbtree for a free extent of a given size, but this
6308  * is a good start.
6309  *
6310  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6311  * any of the information in this block group.
6312  */
6313 static noinline void
6314 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6315                                 u64 num_bytes)
6316 {
6317         struct btrfs_caching_control *caching_ctl;
6318
6319         caching_ctl = get_caching_control(cache);
6320         if (!caching_ctl)
6321                 return;
6322
6323         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6324                    (cache->free_space_ctl->free_space >= num_bytes));
6325
6326         put_caching_control(caching_ctl);
6327 }
6328
6329 static noinline int
6330 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6331 {
6332         struct btrfs_caching_control *caching_ctl;
6333         int ret = 0;
6334
6335         caching_ctl = get_caching_control(cache);
6336         if (!caching_ctl)
6337                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6338
6339         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6340         if (cache->cached == BTRFS_CACHE_ERROR)
6341                 ret = -EIO;
6342         put_caching_control(caching_ctl);
6343         return ret;
6344 }
6345
6346 int __get_raid_index(u64 flags)
6347 {
6348         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6349                 return BTRFS_RAID_RAID10;
6350         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6351                 return BTRFS_RAID_RAID1;
6352         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6353                 return BTRFS_RAID_DUP;
6354         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6355                 return BTRFS_RAID_RAID0;
6356         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6357                 return BTRFS_RAID_RAID5;
6358         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6359                 return BTRFS_RAID_RAID6;
6360
6361         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6362 }
6363
6364 int get_block_group_index(struct btrfs_block_group_cache *cache)
6365 {
6366         return __get_raid_index(cache->flags);
6367 }
6368
6369 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6370         [BTRFS_RAID_RAID10]     = "raid10",
6371         [BTRFS_RAID_RAID1]      = "raid1",
6372         [BTRFS_RAID_DUP]        = "dup",
6373         [BTRFS_RAID_RAID0]      = "raid0",
6374         [BTRFS_RAID_SINGLE]     = "single",
6375         [BTRFS_RAID_RAID5]      = "raid5",
6376         [BTRFS_RAID_RAID6]      = "raid6",
6377 };
6378
6379 static const char *get_raid_name(enum btrfs_raid_types type)
6380 {
6381         if (type >= BTRFS_NR_RAID_TYPES)
6382                 return NULL;
6383
6384         return btrfs_raid_type_names[type];
6385 }
6386
6387 enum btrfs_loop_type {
6388         LOOP_CACHING_NOWAIT = 0,
6389         LOOP_CACHING_WAIT = 1,
6390         LOOP_ALLOC_CHUNK = 2,
6391         LOOP_NO_EMPTY_SIZE = 3,
6392 };
6393
6394 static inline void
6395 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6396                        int delalloc)
6397 {
6398         if (delalloc)
6399                 down_read(&cache->data_rwsem);
6400 }
6401
6402 static inline void
6403 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6404                        int delalloc)
6405 {
6406         btrfs_get_block_group(cache);
6407         if (delalloc)
6408                 down_read(&cache->data_rwsem);
6409 }
6410
6411 static struct btrfs_block_group_cache *
6412 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6413                    struct btrfs_free_cluster *cluster,
6414                    int delalloc)
6415 {
6416         struct btrfs_block_group_cache *used_bg;
6417         bool locked = false;
6418 again:
6419         spin_lock(&cluster->refill_lock);
6420         if (locked) {
6421                 if (used_bg == cluster->block_group)
6422                         return used_bg;
6423
6424                 up_read(&used_bg->data_rwsem);
6425                 btrfs_put_block_group(used_bg);
6426         }
6427
6428         used_bg = cluster->block_group;
6429         if (!used_bg)
6430                 return NULL;
6431
6432         if (used_bg == block_group)
6433                 return used_bg;
6434
6435         btrfs_get_block_group(used_bg);
6436
6437         if (!delalloc)
6438                 return used_bg;
6439
6440         if (down_read_trylock(&used_bg->data_rwsem))
6441                 return used_bg;
6442
6443         spin_unlock(&cluster->refill_lock);
6444         down_read(&used_bg->data_rwsem);
6445         locked = true;
6446         goto again;
6447 }
6448
6449 static inline void
6450 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6451                          int delalloc)
6452 {
6453         if (delalloc)
6454                 up_read(&cache->data_rwsem);
6455         btrfs_put_block_group(cache);
6456 }
6457
6458 /*
6459  * walks the btree of allocated extents and find a hole of a given size.
6460  * The key ins is changed to record the hole:
6461  * ins->objectid == start position
6462  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6463  * ins->offset == the size of the hole.
6464  * Any available blocks before search_start are skipped.
6465  *
6466  * If there is no suitable free space, we will record the max size of
6467  * the free space extent currently.
6468  */
6469 static noinline int find_free_extent(struct btrfs_root *orig_root,
6470                                      u64 num_bytes, u64 empty_size,
6471                                      u64 hint_byte, struct btrfs_key *ins,
6472                                      u64 flags, int delalloc)
6473 {
6474         int ret = 0;
6475         struct btrfs_root *root = orig_root->fs_info->extent_root;
6476         struct btrfs_free_cluster *last_ptr = NULL;
6477         struct btrfs_block_group_cache *block_group = NULL;
6478         u64 search_start = 0;
6479         u64 max_extent_size = 0;
6480         int empty_cluster = 2 * 1024 * 1024;
6481         struct btrfs_space_info *space_info;
6482         int loop = 0;
6483         int index = __get_raid_index(flags);
6484         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6485                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6486         bool failed_cluster_refill = false;
6487         bool failed_alloc = false;
6488         bool use_cluster = true;
6489         bool have_caching_bg = false;
6490
6491         WARN_ON(num_bytes < root->sectorsize);
6492         ins->type = BTRFS_EXTENT_ITEM_KEY;
6493         ins->objectid = 0;
6494         ins->offset = 0;
6495
6496         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6497
6498         space_info = __find_space_info(root->fs_info, flags);
6499         if (!space_info) {
6500                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6501                 return -ENOSPC;
6502         }
6503
6504         /*
6505          * If the space info is for both data and metadata it means we have a
6506          * small filesystem and we can't use the clustering stuff.
6507          */
6508         if (btrfs_mixed_space_info(space_info))
6509                 use_cluster = false;
6510
6511         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6512                 last_ptr = &root->fs_info->meta_alloc_cluster;
6513                 if (!btrfs_test_opt(root, SSD))
6514                         empty_cluster = 64 * 1024;
6515         }
6516
6517         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6518             btrfs_test_opt(root, SSD)) {
6519                 last_ptr = &root->fs_info->data_alloc_cluster;
6520         }
6521
6522         if (last_ptr) {
6523                 spin_lock(&last_ptr->lock);
6524                 if (last_ptr->block_group)
6525                         hint_byte = last_ptr->window_start;
6526                 spin_unlock(&last_ptr->lock);
6527         }
6528
6529         search_start = max(search_start, first_logical_byte(root, 0));
6530         search_start = max(search_start, hint_byte);
6531
6532         if (!last_ptr)
6533                 empty_cluster = 0;
6534
6535         if (search_start == hint_byte) {
6536                 block_group = btrfs_lookup_block_group(root->fs_info,
6537                                                        search_start);
6538                 /*
6539                  * we don't want to use the block group if it doesn't match our
6540                  * allocation bits, or if its not cached.
6541                  *
6542                  * However if we are re-searching with an ideal block group
6543                  * picked out then we don't care that the block group is cached.
6544                  */
6545                 if (block_group && block_group_bits(block_group, flags) &&
6546                     block_group->cached != BTRFS_CACHE_NO) {
6547                         down_read(&space_info->groups_sem);
6548                         if (list_empty(&block_group->list) ||
6549                             block_group->ro) {
6550                                 /*
6551                                  * someone is removing this block group,
6552                                  * we can't jump into the have_block_group
6553                                  * target because our list pointers are not
6554                                  * valid
6555                                  */
6556                                 btrfs_put_block_group(block_group);
6557                                 up_read(&space_info->groups_sem);
6558                         } else {
6559                                 index = get_block_group_index(block_group);
6560                                 btrfs_lock_block_group(block_group, delalloc);
6561                                 goto have_block_group;
6562                         }
6563                 } else if (block_group) {
6564                         btrfs_put_block_group(block_group);
6565                 }
6566         }
6567 search:
6568         have_caching_bg = false;
6569         down_read(&space_info->groups_sem);
6570         list_for_each_entry(block_group, &space_info->block_groups[index],
6571                             list) {
6572                 u64 offset;
6573                 int cached;
6574
6575                 btrfs_grab_block_group(block_group, delalloc);
6576                 search_start = block_group->key.objectid;
6577
6578                 /*
6579                  * this can happen if we end up cycling through all the
6580                  * raid types, but we want to make sure we only allocate
6581                  * for the proper type.
6582                  */
6583                 if (!block_group_bits(block_group, flags)) {
6584                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6585                                 BTRFS_BLOCK_GROUP_RAID1 |
6586                                 BTRFS_BLOCK_GROUP_RAID5 |
6587                                 BTRFS_BLOCK_GROUP_RAID6 |
6588                                 BTRFS_BLOCK_GROUP_RAID10;
6589
6590                         /*
6591                          * if they asked for extra copies and this block group
6592                          * doesn't provide them, bail.  This does allow us to
6593                          * fill raid0 from raid1.
6594                          */
6595                         if ((flags & extra) && !(block_group->flags & extra))
6596                                 goto loop;
6597                 }
6598
6599 have_block_group:
6600                 cached = block_group_cache_done(block_group);
6601                 if (unlikely(!cached)) {
6602                         ret = cache_block_group(block_group, 0);
6603                         BUG_ON(ret < 0);
6604                         ret = 0;
6605                 }
6606
6607                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6608                         goto loop;
6609                 if (unlikely(block_group->ro))
6610                         goto loop;
6611
6612                 /*
6613                  * Ok we want to try and use the cluster allocator, so
6614                  * lets look there
6615                  */
6616                 if (last_ptr) {
6617                         struct btrfs_block_group_cache *used_block_group;
6618                         unsigned long aligned_cluster;
6619                         /*
6620                          * the refill lock keeps out other
6621                          * people trying to start a new cluster
6622                          */
6623                         used_block_group = btrfs_lock_cluster(block_group,
6624                                                               last_ptr,
6625                                                               delalloc);
6626                         if (!used_block_group)
6627                                 goto refill_cluster;
6628
6629                         if (used_block_group != block_group &&
6630                             (used_block_group->ro ||
6631                              !block_group_bits(used_block_group, flags)))
6632                                 goto release_cluster;
6633
6634                         offset = btrfs_alloc_from_cluster(used_block_group,
6635                                                 last_ptr,
6636                                                 num_bytes,
6637                                                 used_block_group->key.objectid,
6638                                                 &max_extent_size);
6639                         if (offset) {
6640                                 /* we have a block, we're done */
6641                                 spin_unlock(&last_ptr->refill_lock);
6642                                 trace_btrfs_reserve_extent_cluster(root,
6643                                                 used_block_group,
6644                                                 search_start, num_bytes);
6645                                 if (used_block_group != block_group) {
6646                                         btrfs_release_block_group(block_group,
6647                                                                   delalloc);
6648                                         block_group = used_block_group;
6649                                 }
6650                                 goto checks;
6651                         }
6652
6653                         WARN_ON(last_ptr->block_group != used_block_group);
6654 release_cluster:
6655                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6656                          * set up a new clusters, so lets just skip it
6657                          * and let the allocator find whatever block
6658                          * it can find.  If we reach this point, we
6659                          * will have tried the cluster allocator
6660                          * plenty of times and not have found
6661                          * anything, so we are likely way too
6662                          * fragmented for the clustering stuff to find
6663                          * anything.
6664                          *
6665                          * However, if the cluster is taken from the
6666                          * current block group, release the cluster
6667                          * first, so that we stand a better chance of
6668                          * succeeding in the unclustered
6669                          * allocation.  */
6670                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6671                             used_block_group != block_group) {
6672                                 spin_unlock(&last_ptr->refill_lock);
6673                                 btrfs_release_block_group(used_block_group,
6674                                                           delalloc);
6675                                 goto unclustered_alloc;
6676                         }
6677
6678                         /*
6679                          * this cluster didn't work out, free it and
6680                          * start over
6681                          */
6682                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6683
6684                         if (used_block_group != block_group)
6685                                 btrfs_release_block_group(used_block_group,
6686                                                           delalloc);
6687 refill_cluster:
6688                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6689                                 spin_unlock(&last_ptr->refill_lock);
6690                                 goto unclustered_alloc;
6691                         }
6692
6693                         aligned_cluster = max_t(unsigned long,
6694                                                 empty_cluster + empty_size,
6695                                               block_group->full_stripe_len);
6696
6697                         /* allocate a cluster in this block group */
6698                         ret = btrfs_find_space_cluster(root, block_group,
6699                                                        last_ptr, search_start,
6700                                                        num_bytes,
6701                                                        aligned_cluster);
6702                         if (ret == 0) {
6703                                 /*
6704                                  * now pull our allocation out of this
6705                                  * cluster
6706                                  */
6707                                 offset = btrfs_alloc_from_cluster(block_group,
6708                                                         last_ptr,
6709                                                         num_bytes,
6710                                                         search_start,
6711                                                         &max_extent_size);
6712                                 if (offset) {
6713                                         /* we found one, proceed */
6714                                         spin_unlock(&last_ptr->refill_lock);
6715                                         trace_btrfs_reserve_extent_cluster(root,
6716                                                 block_group, search_start,
6717                                                 num_bytes);
6718                                         goto checks;
6719                                 }
6720                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6721                                    && !failed_cluster_refill) {
6722                                 spin_unlock(&last_ptr->refill_lock);
6723
6724                                 failed_cluster_refill = true;
6725                                 wait_block_group_cache_progress(block_group,
6726                                        num_bytes + empty_cluster + empty_size);
6727                                 goto have_block_group;
6728                         }
6729
6730                         /*
6731                          * at this point we either didn't find a cluster
6732                          * or we weren't able to allocate a block from our
6733                          * cluster.  Free the cluster we've been trying
6734                          * to use, and go to the next block group
6735                          */
6736                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6737                         spin_unlock(&last_ptr->refill_lock);
6738                         goto loop;
6739                 }
6740
6741 unclustered_alloc:
6742                 spin_lock(&block_group->free_space_ctl->tree_lock);
6743                 if (cached &&
6744                     block_group->free_space_ctl->free_space <
6745                     num_bytes + empty_cluster + empty_size) {
6746                         if (block_group->free_space_ctl->free_space >
6747                             max_extent_size)
6748                                 max_extent_size =
6749                                         block_group->free_space_ctl->free_space;
6750                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6751                         goto loop;
6752                 }
6753                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6754
6755                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6756                                                     num_bytes, empty_size,
6757                                                     &max_extent_size);
6758                 /*
6759                  * If we didn't find a chunk, and we haven't failed on this
6760                  * block group before, and this block group is in the middle of
6761                  * caching and we are ok with waiting, then go ahead and wait
6762                  * for progress to be made, and set failed_alloc to true.
6763                  *
6764                  * If failed_alloc is true then we've already waited on this
6765                  * block group once and should move on to the next block group.
6766                  */
6767                 if (!offset && !failed_alloc && !cached &&
6768                     loop > LOOP_CACHING_NOWAIT) {
6769                         wait_block_group_cache_progress(block_group,
6770                                                 num_bytes + empty_size);
6771                         failed_alloc = true;
6772                         goto have_block_group;
6773                 } else if (!offset) {
6774                         if (!cached)
6775                                 have_caching_bg = true;
6776                         goto loop;
6777                 }
6778 checks:
6779                 search_start = ALIGN(offset, root->stripesize);
6780
6781                 /* move on to the next group */
6782                 if (search_start + num_bytes >
6783                     block_group->key.objectid + block_group->key.offset) {
6784                         btrfs_add_free_space(block_group, offset, num_bytes);
6785                         goto loop;
6786                 }
6787
6788                 if (offset < search_start)
6789                         btrfs_add_free_space(block_group, offset,
6790                                              search_start - offset);
6791                 BUG_ON(offset > search_start);
6792
6793                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6794                                                   alloc_type, delalloc);
6795                 if (ret == -EAGAIN) {
6796                         btrfs_add_free_space(block_group, offset, num_bytes);
6797                         goto loop;
6798                 }
6799
6800                 /* we are all good, lets return */
6801                 ins->objectid = search_start;
6802                 ins->offset = num_bytes;
6803
6804                 trace_btrfs_reserve_extent(orig_root, block_group,
6805                                            search_start, num_bytes);
6806                 btrfs_release_block_group(block_group, delalloc);
6807                 break;
6808 loop:
6809                 failed_cluster_refill = false;
6810                 failed_alloc = false;
6811                 BUG_ON(index != get_block_group_index(block_group));
6812                 btrfs_release_block_group(block_group, delalloc);
6813         }
6814         up_read(&space_info->groups_sem);
6815
6816         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6817                 goto search;
6818
6819         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6820                 goto search;
6821
6822         /*
6823          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6824          *                      caching kthreads as we move along
6825          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6826          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6827          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6828          *                      again
6829          */
6830         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6831                 index = 0;
6832                 loop++;
6833                 if (loop == LOOP_ALLOC_CHUNK) {
6834                         struct btrfs_trans_handle *trans;
6835                         int exist = 0;
6836
6837                         trans = current->journal_info;
6838                         if (trans)
6839                                 exist = 1;
6840                         else
6841                                 trans = btrfs_join_transaction(root);
6842
6843                         if (IS_ERR(trans)) {
6844                                 ret = PTR_ERR(trans);
6845                                 goto out;
6846                         }
6847
6848                         ret = do_chunk_alloc(trans, root, flags,
6849                                              CHUNK_ALLOC_FORCE);
6850                         /*
6851                          * Do not bail out on ENOSPC since we
6852                          * can do more things.
6853                          */
6854                         if (ret < 0 && ret != -ENOSPC)
6855                                 btrfs_abort_transaction(trans,
6856                                                         root, ret);
6857                         else
6858                                 ret = 0;
6859                         if (!exist)
6860                                 btrfs_end_transaction(trans, root);
6861                         if (ret)
6862                                 goto out;
6863                 }
6864
6865                 if (loop == LOOP_NO_EMPTY_SIZE) {
6866                         empty_size = 0;
6867                         empty_cluster = 0;
6868                 }
6869
6870                 goto search;
6871         } else if (!ins->objectid) {
6872                 ret = -ENOSPC;
6873         } else if (ins->objectid) {
6874                 ret = 0;
6875         }
6876 out:
6877         if (ret == -ENOSPC)
6878                 ins->offset = max_extent_size;
6879         return ret;
6880 }
6881
6882 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6883                             int dump_block_groups)
6884 {
6885         struct btrfs_block_group_cache *cache;
6886         int index = 0;
6887
6888         spin_lock(&info->lock);
6889         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6890                info->flags,
6891                info->total_bytes - info->bytes_used - info->bytes_pinned -
6892                info->bytes_reserved - info->bytes_readonly,
6893                (info->full) ? "" : "not ");
6894         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6895                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6896                info->total_bytes, info->bytes_used, info->bytes_pinned,
6897                info->bytes_reserved, info->bytes_may_use,
6898                info->bytes_readonly);
6899         spin_unlock(&info->lock);
6900
6901         if (!dump_block_groups)
6902                 return;
6903
6904         down_read(&info->groups_sem);
6905 again:
6906         list_for_each_entry(cache, &info->block_groups[index], list) {
6907                 spin_lock(&cache->lock);
6908                 printk(KERN_INFO "BTRFS: "
6909                            "block group %llu has %llu bytes, "
6910                            "%llu used %llu pinned %llu reserved %s\n",
6911                        cache->key.objectid, cache->key.offset,
6912                        btrfs_block_group_used(&cache->item), cache->pinned,
6913                        cache->reserved, cache->ro ? "[readonly]" : "");
6914                 btrfs_dump_free_space(cache, bytes);
6915                 spin_unlock(&cache->lock);
6916         }
6917         if (++index < BTRFS_NR_RAID_TYPES)
6918                 goto again;
6919         up_read(&info->groups_sem);
6920 }
6921
6922 int btrfs_reserve_extent(struct btrfs_root *root,
6923                          u64 num_bytes, u64 min_alloc_size,
6924                          u64 empty_size, u64 hint_byte,
6925                          struct btrfs_key *ins, int is_data, int delalloc)
6926 {
6927         bool final_tried = false;
6928         u64 flags;
6929         int ret;
6930
6931         flags = btrfs_get_alloc_profile(root, is_data);
6932 again:
6933         WARN_ON(num_bytes < root->sectorsize);
6934         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6935                                flags, delalloc);
6936
6937         if (ret == -ENOSPC) {
6938                 if (!final_tried && ins->offset) {
6939                         num_bytes = min(num_bytes >> 1, ins->offset);
6940                         num_bytes = round_down(num_bytes, root->sectorsize);
6941                         num_bytes = max(num_bytes, min_alloc_size);
6942                         if (num_bytes == min_alloc_size)
6943                                 final_tried = true;
6944                         goto again;
6945                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6946                         struct btrfs_space_info *sinfo;
6947
6948                         sinfo = __find_space_info(root->fs_info, flags);
6949                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6950                                 flags, num_bytes);
6951                         if (sinfo)
6952                                 dump_space_info(sinfo, num_bytes, 1);
6953                 }
6954         }
6955
6956         return ret;
6957 }
6958
6959 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6960                                         u64 start, u64 len,
6961                                         int pin, int delalloc)
6962 {
6963         struct btrfs_block_group_cache *cache;
6964         int ret = 0;
6965
6966         cache = btrfs_lookup_block_group(root->fs_info, start);
6967         if (!cache) {
6968                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6969                         start);
6970                 return -ENOSPC;
6971         }
6972
6973         if (pin)
6974                 pin_down_extent(root, cache, start, len, 1);
6975         else {
6976                 if (btrfs_test_opt(root, DISCARD))
6977                         ret = btrfs_discard_extent(root, start, len, NULL);
6978                 btrfs_add_free_space(cache, start, len);
6979                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6980         }
6981         btrfs_put_block_group(cache);
6982
6983         trace_btrfs_reserved_extent_free(root, start, len);
6984
6985         return ret;
6986 }
6987
6988 int btrfs_free_reserved_extent(struct btrfs_root *root,
6989                                u64 start, u64 len, int delalloc)
6990 {
6991         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6992 }
6993
6994 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6995                                        u64 start, u64 len)
6996 {
6997         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6998 }
6999
7000 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7001                                       struct btrfs_root *root,
7002                                       u64 parent, u64 root_objectid,
7003                                       u64 flags, u64 owner, u64 offset,
7004                                       struct btrfs_key *ins, int ref_mod)
7005 {
7006         int ret;
7007         struct btrfs_fs_info *fs_info = root->fs_info;
7008         struct btrfs_extent_item *extent_item;
7009         struct btrfs_extent_inline_ref *iref;
7010         struct btrfs_path *path;
7011         struct extent_buffer *leaf;
7012         int type;
7013         u32 size;
7014
7015         if (parent > 0)
7016                 type = BTRFS_SHARED_DATA_REF_KEY;
7017         else
7018                 type = BTRFS_EXTENT_DATA_REF_KEY;
7019
7020         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7021
7022         path = btrfs_alloc_path();
7023         if (!path)
7024                 return -ENOMEM;
7025
7026         path->leave_spinning = 1;
7027         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7028                                       ins, size);
7029         if (ret) {
7030                 btrfs_free_path(path);
7031                 return ret;
7032         }
7033
7034         leaf = path->nodes[0];
7035         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7036                                      struct btrfs_extent_item);
7037         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7038         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7039         btrfs_set_extent_flags(leaf, extent_item,
7040                                flags | BTRFS_EXTENT_FLAG_DATA);
7041
7042         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7043         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7044         if (parent > 0) {
7045                 struct btrfs_shared_data_ref *ref;
7046                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7047                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7048                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7049         } else {
7050                 struct btrfs_extent_data_ref *ref;
7051                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7052                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7053                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7054                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7055                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7056         }
7057
7058         btrfs_mark_buffer_dirty(path->nodes[0]);
7059         btrfs_free_path(path);
7060
7061         /* Always set parent to 0 here since its exclusive anyway. */
7062         ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7063                                       ins->objectid, ins->offset,
7064                                       BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7065         if (ret)
7066                 return ret;
7067
7068         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7069         if (ret) { /* -ENOENT, logic error */
7070                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7071                         ins->objectid, ins->offset);
7072                 BUG();
7073         }
7074         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7075         return ret;
7076 }
7077
7078 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7079                                      struct btrfs_root *root,
7080                                      u64 parent, u64 root_objectid,
7081                                      u64 flags, struct btrfs_disk_key *key,
7082                                      int level, struct btrfs_key *ins,
7083                                      int no_quota)
7084 {
7085         int ret;
7086         struct btrfs_fs_info *fs_info = root->fs_info;
7087         struct btrfs_extent_item *extent_item;
7088         struct btrfs_tree_block_info *block_info;
7089         struct btrfs_extent_inline_ref *iref;
7090         struct btrfs_path *path;
7091         struct extent_buffer *leaf;
7092         u32 size = sizeof(*extent_item) + sizeof(*iref);
7093         u64 num_bytes = ins->offset;
7094         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7095                                                  SKINNY_METADATA);
7096
7097         if (!skinny_metadata)
7098                 size += sizeof(*block_info);
7099
7100         path = btrfs_alloc_path();
7101         if (!path) {
7102                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7103                                                    root->nodesize);
7104                 return -ENOMEM;
7105         }
7106
7107         path->leave_spinning = 1;
7108         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7109                                       ins, size);
7110         if (ret) {
7111                 btrfs_free_path(path);
7112                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7113                                                    root->nodesize);
7114                 return ret;
7115         }
7116
7117         leaf = path->nodes[0];
7118         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7119                                      struct btrfs_extent_item);
7120         btrfs_set_extent_refs(leaf, extent_item, 1);
7121         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7122         btrfs_set_extent_flags(leaf, extent_item,
7123                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7124
7125         if (skinny_metadata) {
7126                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7127                 num_bytes = root->nodesize;
7128         } else {
7129                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7130                 btrfs_set_tree_block_key(leaf, block_info, key);
7131                 btrfs_set_tree_block_level(leaf, block_info, level);
7132                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7133         }
7134
7135         if (parent > 0) {
7136                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7137                 btrfs_set_extent_inline_ref_type(leaf, iref,
7138                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7139                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7140         } else {
7141                 btrfs_set_extent_inline_ref_type(leaf, iref,
7142                                                  BTRFS_TREE_BLOCK_REF_KEY);
7143                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7144         }
7145
7146         btrfs_mark_buffer_dirty(leaf);
7147         btrfs_free_path(path);
7148
7149         if (!no_quota) {
7150                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7151                                               ins->objectid, num_bytes,
7152                                               BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7153                 if (ret)
7154                         return ret;
7155         }
7156
7157         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7158                                  1);
7159         if (ret) { /* -ENOENT, logic error */
7160                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7161                         ins->objectid, ins->offset);
7162                 BUG();
7163         }
7164
7165         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7166         return ret;
7167 }
7168
7169 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7170                                      struct btrfs_root *root,
7171                                      u64 root_objectid, u64 owner,
7172                                      u64 offset, struct btrfs_key *ins)
7173 {
7174         int ret;
7175
7176         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7177
7178         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7179                                          ins->offset, 0,
7180                                          root_objectid, owner, offset,
7181                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7182         return ret;
7183 }
7184
7185 /*
7186  * this is used by the tree logging recovery code.  It records that
7187  * an extent has been allocated and makes sure to clear the free
7188  * space cache bits as well
7189  */
7190 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7191                                    struct btrfs_root *root,
7192                                    u64 root_objectid, u64 owner, u64 offset,
7193                                    struct btrfs_key *ins)
7194 {
7195         int ret;
7196         struct btrfs_block_group_cache *block_group;
7197
7198         /*
7199          * Mixed block groups will exclude before processing the log so we only
7200          * need to do the exlude dance if this fs isn't mixed.
7201          */
7202         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7203                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7204                 if (ret)
7205                         return ret;
7206         }
7207
7208         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7209         if (!block_group)
7210                 return -EINVAL;
7211
7212         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7213                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7214         BUG_ON(ret); /* logic error */
7215         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7216                                          0, owner, offset, ins, 1);
7217         btrfs_put_block_group(block_group);
7218         return ret;
7219 }
7220
7221 static struct extent_buffer *
7222 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7223                       u64 bytenr, int level)
7224 {
7225         struct extent_buffer *buf;
7226
7227         buf = btrfs_find_create_tree_block(root, bytenr);
7228         if (!buf)
7229                 return ERR_PTR(-ENOMEM);
7230         btrfs_set_header_generation(buf, trans->transid);
7231         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7232         btrfs_tree_lock(buf);
7233         clean_tree_block(trans, root->fs_info, buf);
7234         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7235
7236         btrfs_set_lock_blocking(buf);
7237         btrfs_set_buffer_uptodate(buf);
7238
7239         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7240                 buf->log_index = root->log_transid % 2;
7241                 /*
7242                  * we allow two log transactions at a time, use different
7243                  * EXENT bit to differentiate dirty pages.
7244                  */
7245                 if (buf->log_index == 0)
7246                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7247                                         buf->start + buf->len - 1, GFP_NOFS);
7248                 else
7249                         set_extent_new(&root->dirty_log_pages, buf->start,
7250                                         buf->start + buf->len - 1, GFP_NOFS);
7251         } else {
7252                 buf->log_index = -1;
7253                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7254                          buf->start + buf->len - 1, GFP_NOFS);
7255         }
7256         trans->blocks_used++;
7257         /* this returns a buffer locked for blocking */
7258         return buf;
7259 }
7260
7261 static struct btrfs_block_rsv *
7262 use_block_rsv(struct btrfs_trans_handle *trans,
7263               struct btrfs_root *root, u32 blocksize)
7264 {
7265         struct btrfs_block_rsv *block_rsv;
7266         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7267         int ret;
7268         bool global_updated = false;
7269
7270         block_rsv = get_block_rsv(trans, root);
7271
7272         if (unlikely(block_rsv->size == 0))
7273                 goto try_reserve;
7274 again:
7275         ret = block_rsv_use_bytes(block_rsv, blocksize);
7276         if (!ret)
7277                 return block_rsv;
7278
7279         if (block_rsv->failfast)
7280                 return ERR_PTR(ret);
7281
7282         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7283                 global_updated = true;
7284                 update_global_block_rsv(root->fs_info);
7285                 goto again;
7286         }
7287
7288         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7289                 static DEFINE_RATELIMIT_STATE(_rs,
7290                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7291                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7292                 if (__ratelimit(&_rs))
7293                         WARN(1, KERN_DEBUG
7294                                 "BTRFS: block rsv returned %d\n", ret);
7295         }
7296 try_reserve:
7297         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7298                                      BTRFS_RESERVE_NO_FLUSH);
7299         if (!ret)
7300                 return block_rsv;
7301         /*
7302          * If we couldn't reserve metadata bytes try and use some from
7303          * the global reserve if its space type is the same as the global
7304          * reservation.
7305          */
7306         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7307             block_rsv->space_info == global_rsv->space_info) {
7308                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7309                 if (!ret)
7310                         return global_rsv;
7311         }
7312         return ERR_PTR(ret);
7313 }
7314
7315 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7316                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7317 {
7318         block_rsv_add_bytes(block_rsv, blocksize, 0);
7319         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7320 }
7321
7322 /*
7323  * finds a free extent and does all the dirty work required for allocation
7324  * returns the key for the extent through ins, and a tree buffer for
7325  * the first block of the extent through buf.
7326  *
7327  * returns the tree buffer or NULL.
7328  */
7329 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7330                                         struct btrfs_root *root,
7331                                         u64 parent, u64 root_objectid,
7332                                         struct btrfs_disk_key *key, int level,
7333                                         u64 hint, u64 empty_size)
7334 {
7335         struct btrfs_key ins;
7336         struct btrfs_block_rsv *block_rsv;
7337         struct extent_buffer *buf;
7338         u64 flags = 0;
7339         int ret;
7340         u32 blocksize = root->nodesize;
7341         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7342                                                  SKINNY_METADATA);
7343
7344         if (btrfs_test_is_dummy_root(root)) {
7345                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7346                                             level);
7347                 if (!IS_ERR(buf))
7348                         root->alloc_bytenr += blocksize;
7349                 return buf;
7350         }
7351
7352         block_rsv = use_block_rsv(trans, root, blocksize);
7353         if (IS_ERR(block_rsv))
7354                 return ERR_CAST(block_rsv);
7355
7356         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7357                                    empty_size, hint, &ins, 0, 0);
7358         if (ret) {
7359                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7360                 return ERR_PTR(ret);
7361         }
7362
7363         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7364         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7365
7366         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7367                 if (parent == 0)
7368                         parent = ins.objectid;
7369                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7370         } else
7371                 BUG_ON(parent > 0);
7372
7373         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7374                 struct btrfs_delayed_extent_op *extent_op;
7375                 extent_op = btrfs_alloc_delayed_extent_op();
7376                 BUG_ON(!extent_op); /* -ENOMEM */
7377                 if (key)
7378                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7379                 else
7380                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7381                 extent_op->flags_to_set = flags;
7382                 if (skinny_metadata)
7383                         extent_op->update_key = 0;
7384                 else
7385                         extent_op->update_key = 1;
7386                 extent_op->update_flags = 1;
7387                 extent_op->is_data = 0;
7388                 extent_op->level = level;
7389
7390                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7391                                         ins.objectid,
7392                                         ins.offset, parent, root_objectid,
7393                                         level, BTRFS_ADD_DELAYED_EXTENT,
7394                                         extent_op, 0);
7395                 BUG_ON(ret); /* -ENOMEM */
7396         }
7397         return buf;
7398 }
7399
7400 struct walk_control {
7401         u64 refs[BTRFS_MAX_LEVEL];
7402         u64 flags[BTRFS_MAX_LEVEL];
7403         struct btrfs_key update_progress;
7404         int stage;
7405         int level;
7406         int shared_level;
7407         int update_ref;
7408         int keep_locks;
7409         int reada_slot;
7410         int reada_count;
7411         int for_reloc;
7412 };
7413
7414 #define DROP_REFERENCE  1
7415 #define UPDATE_BACKREF  2
7416
7417 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7418                                      struct btrfs_root *root,
7419                                      struct walk_control *wc,
7420                                      struct btrfs_path *path)
7421 {
7422         u64 bytenr;
7423         u64 generation;
7424         u64 refs;
7425         u64 flags;
7426         u32 nritems;
7427         u32 blocksize;
7428         struct btrfs_key key;
7429         struct extent_buffer *eb;
7430         int ret;
7431         int slot;
7432         int nread = 0;
7433
7434         if (path->slots[wc->level] < wc->reada_slot) {
7435                 wc->reada_count = wc->reada_count * 2 / 3;
7436                 wc->reada_count = max(wc->reada_count, 2);
7437         } else {
7438                 wc->reada_count = wc->reada_count * 3 / 2;
7439                 wc->reada_count = min_t(int, wc->reada_count,
7440                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7441         }
7442
7443         eb = path->nodes[wc->level];
7444         nritems = btrfs_header_nritems(eb);
7445         blocksize = root->nodesize;
7446
7447         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7448                 if (nread >= wc->reada_count)
7449                         break;
7450
7451                 cond_resched();
7452                 bytenr = btrfs_node_blockptr(eb, slot);
7453                 generation = btrfs_node_ptr_generation(eb, slot);
7454
7455                 if (slot == path->slots[wc->level])
7456                         goto reada;
7457
7458                 if (wc->stage == UPDATE_BACKREF &&
7459                     generation <= root->root_key.offset)
7460                         continue;
7461
7462                 /* We don't lock the tree block, it's OK to be racy here */
7463                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7464                                                wc->level - 1, 1, &refs,
7465                                                &flags);
7466                 /* We don't care about errors in readahead. */
7467                 if (ret < 0)
7468                         continue;
7469                 BUG_ON(refs == 0);
7470
7471                 if (wc->stage == DROP_REFERENCE) {
7472                         if (refs == 1)
7473                                 goto reada;
7474
7475                         if (wc->level == 1 &&
7476                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7477                                 continue;
7478                         if (!wc->update_ref ||
7479                             generation <= root->root_key.offset)
7480                                 continue;
7481                         btrfs_node_key_to_cpu(eb, &key, slot);
7482                         ret = btrfs_comp_cpu_keys(&key,
7483                                                   &wc->update_progress);
7484                         if (ret < 0)
7485                                 continue;
7486                 } else {
7487                         if (wc->level == 1 &&
7488                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7489                                 continue;
7490                 }
7491 reada:
7492                 readahead_tree_block(root, bytenr);
7493                 nread++;
7494         }
7495         wc->reada_slot = slot;
7496 }
7497
7498 static int account_leaf_items(struct btrfs_trans_handle *trans,
7499                               struct btrfs_root *root,
7500                               struct extent_buffer *eb)
7501 {
7502         int nr = btrfs_header_nritems(eb);
7503         int i, extent_type, ret;
7504         struct btrfs_key key;
7505         struct btrfs_file_extent_item *fi;
7506         u64 bytenr, num_bytes;
7507
7508         for (i = 0; i < nr; i++) {
7509                 btrfs_item_key_to_cpu(eb, &key, i);
7510
7511                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7512                         continue;
7513
7514                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7515                 /* filter out non qgroup-accountable extents  */
7516                 extent_type = btrfs_file_extent_type(eb, fi);
7517
7518                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7519                         continue;
7520
7521                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7522                 if (!bytenr)
7523                         continue;
7524
7525                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7526
7527                 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7528                                               root->objectid,
7529                                               bytenr, num_bytes,
7530                                               BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7531                 if (ret)
7532                         return ret;
7533         }
7534         return 0;
7535 }
7536
7537 /*
7538  * Walk up the tree from the bottom, freeing leaves and any interior
7539  * nodes which have had all slots visited. If a node (leaf or
7540  * interior) is freed, the node above it will have it's slot
7541  * incremented. The root node will never be freed.
7542  *
7543  * At the end of this function, we should have a path which has all
7544  * slots incremented to the next position for a search. If we need to
7545  * read a new node it will be NULL and the node above it will have the
7546  * correct slot selected for a later read.
7547  *
7548  * If we increment the root nodes slot counter past the number of
7549  * elements, 1 is returned to signal completion of the search.
7550  */
7551 static int adjust_slots_upwards(struct btrfs_root *root,
7552                                 struct btrfs_path *path, int root_level)
7553 {
7554         int level = 0;
7555         int nr, slot;
7556         struct extent_buffer *eb;
7557
7558         if (root_level == 0)
7559                 return 1;
7560
7561         while (level <= root_level) {
7562                 eb = path->nodes[level];
7563                 nr = btrfs_header_nritems(eb);
7564                 path->slots[level]++;
7565                 slot = path->slots[level];
7566                 if (slot >= nr || level == 0) {
7567                         /*
7568                          * Don't free the root -  we will detect this
7569                          * condition after our loop and return a
7570                          * positive value for caller to stop walking the tree.
7571                          */
7572                         if (level != root_level) {
7573                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7574                                 path->locks[level] = 0;
7575
7576                                 free_extent_buffer(eb);
7577                                 path->nodes[level] = NULL;
7578                                 path->slots[level] = 0;
7579                         }
7580                 } else {
7581                         /*
7582                          * We have a valid slot to walk back down
7583                          * from. Stop here so caller can process these
7584                          * new nodes.
7585                          */
7586                         break;
7587                 }
7588
7589                 level++;
7590         }
7591
7592         eb = path->nodes[root_level];
7593         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7594                 return 1;
7595
7596         return 0;
7597 }
7598
7599 /*
7600  * root_eb is the subtree root and is locked before this function is called.
7601  */
7602 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7603                                   struct btrfs_root *root,
7604                                   struct extent_buffer *root_eb,
7605                                   u64 root_gen,
7606                                   int root_level)
7607 {
7608         int ret = 0;
7609         int level;
7610         struct extent_buffer *eb = root_eb;
7611         struct btrfs_path *path = NULL;
7612
7613         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7614         BUG_ON(root_eb == NULL);
7615
7616         if (!root->fs_info->quota_enabled)
7617                 return 0;
7618
7619         if (!extent_buffer_uptodate(root_eb)) {
7620                 ret = btrfs_read_buffer(root_eb, root_gen);
7621                 if (ret)
7622                         goto out;
7623         }
7624
7625         if (root_level == 0) {
7626                 ret = account_leaf_items(trans, root, root_eb);
7627                 goto out;
7628         }
7629
7630         path = btrfs_alloc_path();
7631         if (!path)
7632                 return -ENOMEM;
7633
7634         /*
7635          * Walk down the tree.  Missing extent blocks are filled in as
7636          * we go. Metadata is accounted every time we read a new
7637          * extent block.
7638          *
7639          * When we reach a leaf, we account for file extent items in it,
7640          * walk back up the tree (adjusting slot pointers as we go)
7641          * and restart the search process.
7642          */
7643         extent_buffer_get(root_eb); /* For path */
7644         path->nodes[root_level] = root_eb;
7645         path->slots[root_level] = 0;
7646         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7647 walk_down:
7648         level = root_level;
7649         while (level >= 0) {
7650                 if (path->nodes[level] == NULL) {
7651                         int parent_slot;
7652                         u64 child_gen;
7653                         u64 child_bytenr;
7654
7655                         /* We need to get child blockptr/gen from
7656                          * parent before we can read it. */
7657                         eb = path->nodes[level + 1];
7658                         parent_slot = path->slots[level + 1];
7659                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7660                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7661
7662                         eb = read_tree_block(root, child_bytenr, child_gen);
7663                         if (!eb || !extent_buffer_uptodate(eb)) {
7664                                 ret = -EIO;
7665                                 goto out;
7666                         }
7667
7668                         path->nodes[level] = eb;
7669                         path->slots[level] = 0;
7670
7671                         btrfs_tree_read_lock(eb);
7672                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7673                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7674
7675                         ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7676                                                 root->objectid,
7677                                                 child_bytenr,
7678                                                 root->nodesize,
7679                                                 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7680                                                 0);
7681                         if (ret)
7682                                 goto out;
7683
7684                 }
7685
7686                 if (level == 0) {
7687                         ret = account_leaf_items(trans, root, path->nodes[level]);
7688                         if (ret)
7689                                 goto out;
7690
7691                         /* Nonzero return here means we completed our search */
7692                         ret = adjust_slots_upwards(root, path, root_level);
7693                         if (ret)
7694                                 break;
7695
7696                         /* Restart search with new slots */
7697                         goto walk_down;
7698                 }
7699
7700                 level--;
7701         }
7702
7703         ret = 0;
7704 out:
7705         btrfs_free_path(path);
7706
7707         return ret;
7708 }
7709
7710 /*
7711  * helper to process tree block while walking down the tree.
7712  *
7713  * when wc->stage == UPDATE_BACKREF, this function updates
7714  * back refs for pointers in the block.
7715  *
7716  * NOTE: return value 1 means we should stop walking down.
7717  */
7718 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7719                                    struct btrfs_root *root,
7720                                    struct btrfs_path *path,
7721                                    struct walk_control *wc, int lookup_info)
7722 {
7723         int level = wc->level;
7724         struct extent_buffer *eb = path->nodes[level];
7725         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7726         int ret;
7727
7728         if (wc->stage == UPDATE_BACKREF &&
7729             btrfs_header_owner(eb) != root->root_key.objectid)
7730                 return 1;
7731
7732         /*
7733          * when reference count of tree block is 1, it won't increase
7734          * again. once full backref flag is set, we never clear it.
7735          */
7736         if (lookup_info &&
7737             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7738              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7739                 BUG_ON(!path->locks[level]);
7740                 ret = btrfs_lookup_extent_info(trans, root,
7741                                                eb->start, level, 1,
7742                                                &wc->refs[level],
7743                                                &wc->flags[level]);
7744                 BUG_ON(ret == -ENOMEM);
7745                 if (ret)
7746                         return ret;
7747                 BUG_ON(wc->refs[level] == 0);
7748         }
7749
7750         if (wc->stage == DROP_REFERENCE) {
7751                 if (wc->refs[level] > 1)
7752                         return 1;
7753
7754                 if (path->locks[level] && !wc->keep_locks) {
7755                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7756                         path->locks[level] = 0;
7757                 }
7758                 return 0;
7759         }
7760
7761         /* wc->stage == UPDATE_BACKREF */
7762         if (!(wc->flags[level] & flag)) {
7763                 BUG_ON(!path->locks[level]);
7764                 ret = btrfs_inc_ref(trans, root, eb, 1);
7765                 BUG_ON(ret); /* -ENOMEM */
7766                 ret = btrfs_dec_ref(trans, root, eb, 0);
7767                 BUG_ON(ret); /* -ENOMEM */
7768                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7769                                                   eb->len, flag,
7770                                                   btrfs_header_level(eb), 0);
7771                 BUG_ON(ret); /* -ENOMEM */
7772                 wc->flags[level] |= flag;
7773         }
7774
7775         /*
7776          * the block is shared by multiple trees, so it's not good to
7777          * keep the tree lock
7778          */
7779         if (path->locks[level] && level > 0) {
7780                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7781                 path->locks[level] = 0;
7782         }
7783         return 0;
7784 }
7785
7786 /*
7787  * helper to process tree block pointer.
7788  *
7789  * when wc->stage == DROP_REFERENCE, this function checks
7790  * reference count of the block pointed to. if the block
7791  * is shared and we need update back refs for the subtree
7792  * rooted at the block, this function changes wc->stage to
7793  * UPDATE_BACKREF. if the block is shared and there is no
7794  * need to update back, this function drops the reference
7795  * to the block.
7796  *
7797  * NOTE: return value 1 means we should stop walking down.
7798  */
7799 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7800                                  struct btrfs_root *root,
7801                                  struct btrfs_path *path,
7802                                  struct walk_control *wc, int *lookup_info)
7803 {
7804         u64 bytenr;
7805         u64 generation;
7806         u64 parent;
7807         u32 blocksize;
7808         struct btrfs_key key;
7809         struct extent_buffer *next;
7810         int level = wc->level;
7811         int reada = 0;
7812         int ret = 0;
7813         bool need_account = false;
7814
7815         generation = btrfs_node_ptr_generation(path->nodes[level],
7816                                                path->slots[level]);
7817         /*
7818          * if the lower level block was created before the snapshot
7819          * was created, we know there is no need to update back refs
7820          * for the subtree
7821          */
7822         if (wc->stage == UPDATE_BACKREF &&
7823             generation <= root->root_key.offset) {
7824                 *lookup_info = 1;
7825                 return 1;
7826         }
7827
7828         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7829         blocksize = root->nodesize;
7830
7831         next = btrfs_find_tree_block(root->fs_info, bytenr);
7832         if (!next) {
7833                 next = btrfs_find_create_tree_block(root, bytenr);
7834                 if (!next)
7835                         return -ENOMEM;
7836                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7837                                                level - 1);
7838                 reada = 1;
7839         }
7840         btrfs_tree_lock(next);
7841         btrfs_set_lock_blocking(next);
7842
7843         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7844                                        &wc->refs[level - 1],
7845                                        &wc->flags[level - 1]);
7846         if (ret < 0) {
7847                 btrfs_tree_unlock(next);
7848                 return ret;
7849         }
7850
7851         if (unlikely(wc->refs[level - 1] == 0)) {
7852                 btrfs_err(root->fs_info, "Missing references.");
7853                 BUG();
7854         }
7855         *lookup_info = 0;
7856
7857         if (wc->stage == DROP_REFERENCE) {
7858                 if (wc->refs[level - 1] > 1) {
7859                         need_account = true;
7860                         if (level == 1 &&
7861                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7862                                 goto skip;
7863
7864                         if (!wc->update_ref ||
7865                             generation <= root->root_key.offset)
7866                                 goto skip;
7867
7868                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7869                                               path->slots[level]);
7870                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7871                         if (ret < 0)
7872                                 goto skip;
7873
7874                         wc->stage = UPDATE_BACKREF;
7875                         wc->shared_level = level - 1;
7876                 }
7877         } else {
7878                 if (level == 1 &&
7879                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7880                         goto skip;
7881         }
7882
7883         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7884                 btrfs_tree_unlock(next);
7885                 free_extent_buffer(next);
7886                 next = NULL;
7887                 *lookup_info = 1;
7888         }
7889
7890         if (!next) {
7891                 if (reada && level == 1)
7892                         reada_walk_down(trans, root, wc, path);
7893                 next = read_tree_block(root, bytenr, generation);
7894                 if (!next || !extent_buffer_uptodate(next)) {
7895                         free_extent_buffer(next);
7896                         return -EIO;
7897                 }
7898                 btrfs_tree_lock(next);
7899                 btrfs_set_lock_blocking(next);
7900         }
7901
7902         level--;
7903         BUG_ON(level != btrfs_header_level(next));
7904         path->nodes[level] = next;
7905         path->slots[level] = 0;
7906         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7907         wc->level = level;
7908         if (wc->level == 1)
7909                 wc->reada_slot = 0;
7910         return 0;
7911 skip:
7912         wc->refs[level - 1] = 0;
7913         wc->flags[level - 1] = 0;
7914         if (wc->stage == DROP_REFERENCE) {
7915                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7916                         parent = path->nodes[level]->start;
7917                 } else {
7918                         BUG_ON(root->root_key.objectid !=
7919                                btrfs_header_owner(path->nodes[level]));
7920                         parent = 0;
7921                 }
7922
7923                 if (need_account) {
7924                         ret = account_shared_subtree(trans, root, next,
7925                                                      generation, level - 1);
7926                         if (ret) {
7927                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7928                                         "%d accounting shared subtree. Quota "
7929                                         "is out of sync, rescan required.\n",
7930                                         root->fs_info->sb->s_id, ret);
7931                         }
7932                 }
7933                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7934                                 root->root_key.objectid, level - 1, 0, 0);
7935                 BUG_ON(ret); /* -ENOMEM */
7936         }
7937         btrfs_tree_unlock(next);
7938         free_extent_buffer(next);
7939         *lookup_info = 1;
7940         return 1;
7941 }
7942
7943 /*
7944  * helper to process tree block while walking up the tree.
7945  *
7946  * when wc->stage == DROP_REFERENCE, this function drops
7947  * reference count on the block.
7948  *
7949  * when wc->stage == UPDATE_BACKREF, this function changes
7950  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7951  * to UPDATE_BACKREF previously while processing the block.
7952  *
7953  * NOTE: return value 1 means we should stop walking up.
7954  */
7955 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7956                                  struct btrfs_root *root,
7957                                  struct btrfs_path *path,
7958                                  struct walk_control *wc)
7959 {
7960         int ret;
7961         int level = wc->level;
7962         struct extent_buffer *eb = path->nodes[level];
7963         u64 parent = 0;
7964
7965         if (wc->stage == UPDATE_BACKREF) {
7966                 BUG_ON(wc->shared_level < level);
7967                 if (level < wc->shared_level)
7968                         goto out;
7969
7970                 ret = find_next_key(path, level + 1, &wc->update_progress);
7971                 if (ret > 0)
7972                         wc->update_ref = 0;
7973
7974                 wc->stage = DROP_REFERENCE;
7975                 wc->shared_level = -1;
7976                 path->slots[level] = 0;
7977
7978                 /*
7979                  * check reference count again if the block isn't locked.
7980                  * we should start walking down the tree again if reference
7981                  * count is one.
7982                  */
7983                 if (!path->locks[level]) {
7984                         BUG_ON(level == 0);
7985                         btrfs_tree_lock(eb);
7986                         btrfs_set_lock_blocking(eb);
7987                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7988
7989                         ret = btrfs_lookup_extent_info(trans, root,
7990                                                        eb->start, level, 1,
7991                                                        &wc->refs[level],
7992                                                        &wc->flags[level]);
7993                         if (ret < 0) {
7994                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7995                                 path->locks[level] = 0;
7996                                 return ret;
7997                         }
7998                         BUG_ON(wc->refs[level] == 0);
7999                         if (wc->refs[level] == 1) {
8000                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8001                                 path->locks[level] = 0;
8002                                 return 1;
8003                         }
8004                 }
8005         }
8006
8007         /* wc->stage == DROP_REFERENCE */
8008         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8009
8010         if (wc->refs[level] == 1) {
8011                 if (level == 0) {
8012                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8013                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8014                         else
8015                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8016                         BUG_ON(ret); /* -ENOMEM */
8017                         ret = account_leaf_items(trans, root, eb);
8018                         if (ret) {
8019                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8020                                         "%d accounting leaf items. Quota "
8021                                         "is out of sync, rescan required.\n",
8022                                         root->fs_info->sb->s_id, ret);
8023                         }
8024                 }
8025                 /* make block locked assertion in clean_tree_block happy */
8026                 if (!path->locks[level] &&
8027                     btrfs_header_generation(eb) == trans->transid) {
8028                         btrfs_tree_lock(eb);
8029                         btrfs_set_lock_blocking(eb);
8030                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8031                 }
8032                 clean_tree_block(trans, root->fs_info, eb);
8033         }
8034
8035         if (eb == root->node) {
8036                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8037                         parent = eb->start;
8038                 else
8039                         BUG_ON(root->root_key.objectid !=
8040                                btrfs_header_owner(eb));
8041         } else {
8042                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8043                         parent = path->nodes[level + 1]->start;
8044                 else
8045                         BUG_ON(root->root_key.objectid !=
8046                                btrfs_header_owner(path->nodes[level + 1]));
8047         }
8048
8049         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8050 out:
8051         wc->refs[level] = 0;
8052         wc->flags[level] = 0;
8053         return 0;
8054 }
8055
8056 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8057                                    struct btrfs_root *root,
8058                                    struct btrfs_path *path,
8059                                    struct walk_control *wc)
8060 {
8061         int level = wc->level;
8062         int lookup_info = 1;
8063         int ret;
8064
8065         while (level >= 0) {
8066                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8067                 if (ret > 0)
8068                         break;
8069
8070                 if (level == 0)
8071                         break;
8072
8073                 if (path->slots[level] >=
8074                     btrfs_header_nritems(path->nodes[level]))
8075                         break;
8076
8077                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8078                 if (ret > 0) {
8079                         path->slots[level]++;
8080                         continue;
8081                 } else if (ret < 0)
8082                         return ret;
8083                 level = wc->level;
8084         }
8085         return 0;
8086 }
8087
8088 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8089                                  struct btrfs_root *root,
8090                                  struct btrfs_path *path,
8091                                  struct walk_control *wc, int max_level)
8092 {
8093         int level = wc->level;
8094         int ret;
8095
8096         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8097         while (level < max_level && path->nodes[level]) {
8098                 wc->level = level;
8099                 if (path->slots[level] + 1 <
8100                     btrfs_header_nritems(path->nodes[level])) {
8101                         path->slots[level]++;
8102                         return 0;
8103                 } else {
8104                         ret = walk_up_proc(trans, root, path, wc);
8105                         if (ret > 0)
8106                                 return 0;
8107
8108                         if (path->locks[level]) {
8109                                 btrfs_tree_unlock_rw(path->nodes[level],
8110                                                      path->locks[level]);
8111                                 path->locks[level] = 0;
8112                         }
8113                         free_extent_buffer(path->nodes[level]);
8114                         path->nodes[level] = NULL;
8115                         level++;
8116                 }
8117         }
8118         return 1;
8119 }
8120
8121 /*
8122  * drop a subvolume tree.
8123  *
8124  * this function traverses the tree freeing any blocks that only
8125  * referenced by the tree.
8126  *
8127  * when a shared tree block is found. this function decreases its
8128  * reference count by one. if update_ref is true, this function
8129  * also make sure backrefs for the shared block and all lower level
8130  * blocks are properly updated.
8131  *
8132  * If called with for_reloc == 0, may exit early with -EAGAIN
8133  */
8134 int btrfs_drop_snapshot(struct btrfs_root *root,
8135                          struct btrfs_block_rsv *block_rsv, int update_ref,
8136                          int for_reloc)
8137 {
8138         struct btrfs_path *path;
8139         struct btrfs_trans_handle *trans;
8140         struct btrfs_root *tree_root = root->fs_info->tree_root;
8141         struct btrfs_root_item *root_item = &root->root_item;
8142         struct walk_control *wc;
8143         struct btrfs_key key;
8144         int err = 0;
8145         int ret;
8146         int level;
8147         bool root_dropped = false;
8148
8149         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8150
8151         path = btrfs_alloc_path();
8152         if (!path) {
8153                 err = -ENOMEM;
8154                 goto out;
8155         }
8156
8157         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8158         if (!wc) {
8159                 btrfs_free_path(path);
8160                 err = -ENOMEM;
8161                 goto out;
8162         }
8163
8164         trans = btrfs_start_transaction(tree_root, 0);
8165         if (IS_ERR(trans)) {
8166                 err = PTR_ERR(trans);
8167                 goto out_free;
8168         }
8169
8170         if (block_rsv)
8171                 trans->block_rsv = block_rsv;
8172
8173         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8174                 level = btrfs_header_level(root->node);
8175                 path->nodes[level] = btrfs_lock_root_node(root);
8176                 btrfs_set_lock_blocking(path->nodes[level]);
8177                 path->slots[level] = 0;
8178                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8179                 memset(&wc->update_progress, 0,
8180                        sizeof(wc->update_progress));
8181         } else {
8182                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8183                 memcpy(&wc->update_progress, &key,
8184                        sizeof(wc->update_progress));
8185
8186                 level = root_item->drop_level;
8187                 BUG_ON(level == 0);
8188                 path->lowest_level = level;
8189                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8190                 path->lowest_level = 0;
8191                 if (ret < 0) {
8192                         err = ret;
8193                         goto out_end_trans;
8194                 }
8195                 WARN_ON(ret > 0);
8196
8197                 /*
8198                  * unlock our path, this is safe because only this
8199                  * function is allowed to delete this snapshot
8200                  */
8201                 btrfs_unlock_up_safe(path, 0);
8202
8203                 level = btrfs_header_level(root->node);
8204                 while (1) {
8205                         btrfs_tree_lock(path->nodes[level]);
8206                         btrfs_set_lock_blocking(path->nodes[level]);
8207                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8208
8209                         ret = btrfs_lookup_extent_info(trans, root,
8210                                                 path->nodes[level]->start,
8211                                                 level, 1, &wc->refs[level],
8212                                                 &wc->flags[level]);
8213                         if (ret < 0) {
8214                                 err = ret;
8215                                 goto out_end_trans;
8216                         }
8217                         BUG_ON(wc->refs[level] == 0);
8218
8219                         if (level == root_item->drop_level)
8220                                 break;
8221
8222                         btrfs_tree_unlock(path->nodes[level]);
8223                         path->locks[level] = 0;
8224                         WARN_ON(wc->refs[level] != 1);
8225                         level--;
8226                 }
8227         }
8228
8229         wc->level = level;
8230         wc->shared_level = -1;
8231         wc->stage = DROP_REFERENCE;
8232         wc->update_ref = update_ref;
8233         wc->keep_locks = 0;
8234         wc->for_reloc = for_reloc;
8235         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8236
8237         while (1) {
8238
8239                 ret = walk_down_tree(trans, root, path, wc);
8240                 if (ret < 0) {
8241                         err = ret;
8242                         break;
8243                 }
8244
8245                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8246                 if (ret < 0) {
8247                         err = ret;
8248                         break;
8249                 }
8250
8251                 if (ret > 0) {
8252                         BUG_ON(wc->stage != DROP_REFERENCE);
8253                         break;
8254                 }
8255
8256                 if (wc->stage == DROP_REFERENCE) {
8257                         level = wc->level;
8258                         btrfs_node_key(path->nodes[level],
8259                                        &root_item->drop_progress,
8260                                        path->slots[level]);
8261                         root_item->drop_level = level;
8262                 }
8263
8264                 BUG_ON(wc->level == 0);
8265                 if (btrfs_should_end_transaction(trans, tree_root) ||
8266                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8267                         ret = btrfs_update_root(trans, tree_root,
8268                                                 &root->root_key,
8269                                                 root_item);
8270                         if (ret) {
8271                                 btrfs_abort_transaction(trans, tree_root, ret);
8272                                 err = ret;
8273                                 goto out_end_trans;
8274                         }
8275
8276                         /*
8277                          * Qgroup update accounting is run from
8278                          * delayed ref handling. This usually works
8279                          * out because delayed refs are normally the
8280                          * only way qgroup updates are added. However,
8281                          * we may have added updates during our tree
8282                          * walk so run qgroups here to make sure we
8283                          * don't lose any updates.
8284                          */
8285                         ret = btrfs_delayed_qgroup_accounting(trans,
8286                                                               root->fs_info);
8287                         if (ret)
8288                                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8289                                                    "running qgroup updates "
8290                                                    "during snapshot delete. "
8291                                                    "Quota is out of sync, "
8292                                                    "rescan required.\n", ret);
8293
8294                         btrfs_end_transaction_throttle(trans, tree_root);
8295                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8296                                 pr_debug("BTRFS: drop snapshot early exit\n");
8297                                 err = -EAGAIN;
8298                                 goto out_free;
8299                         }
8300
8301                         trans = btrfs_start_transaction(tree_root, 0);
8302                         if (IS_ERR(trans)) {
8303                                 err = PTR_ERR(trans);
8304                                 goto out_free;
8305                         }
8306                         if (block_rsv)
8307                                 trans->block_rsv = block_rsv;
8308                 }
8309         }
8310         btrfs_release_path(path);
8311         if (err)
8312                 goto out_end_trans;
8313
8314         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8315         if (ret) {
8316                 btrfs_abort_transaction(trans, tree_root, ret);
8317                 goto out_end_trans;
8318         }
8319
8320         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8321                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8322                                       NULL, NULL);
8323                 if (ret < 0) {
8324                         btrfs_abort_transaction(trans, tree_root, ret);
8325                         err = ret;
8326                         goto out_end_trans;
8327                 } else if (ret > 0) {
8328                         /* if we fail to delete the orphan item this time
8329                          * around, it'll get picked up the next time.
8330                          *
8331                          * The most common failure here is just -ENOENT.
8332                          */
8333                         btrfs_del_orphan_item(trans, tree_root,
8334                                               root->root_key.objectid);
8335                 }
8336         }
8337
8338         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8339                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8340         } else {
8341                 free_extent_buffer(root->node);
8342                 free_extent_buffer(root->commit_root);
8343                 btrfs_put_fs_root(root);
8344         }
8345         root_dropped = true;
8346 out_end_trans:
8347         ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8348         if (ret)
8349                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8350                                    "running qgroup updates "
8351                                    "during snapshot delete. "
8352                                    "Quota is out of sync, "
8353                                    "rescan required.\n", ret);
8354
8355         btrfs_end_transaction_throttle(trans, tree_root);
8356 out_free:
8357         kfree(wc);
8358         btrfs_free_path(path);
8359 out:
8360         /*
8361          * So if we need to stop dropping the snapshot for whatever reason we
8362          * need to make sure to add it back to the dead root list so that we
8363          * keep trying to do the work later.  This also cleans up roots if we
8364          * don't have it in the radix (like when we recover after a power fail
8365          * or unmount) so we don't leak memory.
8366          */
8367         if (!for_reloc && root_dropped == false)
8368                 btrfs_add_dead_root(root);
8369         if (err && err != -EAGAIN)
8370                 btrfs_std_error(root->fs_info, err);
8371         return err;
8372 }
8373
8374 /*
8375  * drop subtree rooted at tree block 'node'.
8376  *
8377  * NOTE: this function will unlock and release tree block 'node'
8378  * only used by relocation code
8379  */
8380 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8381                         struct btrfs_root *root,
8382                         struct extent_buffer *node,
8383                         struct extent_buffer *parent)
8384 {
8385         struct btrfs_path *path;
8386         struct walk_control *wc;
8387         int level;
8388         int parent_level;
8389         int ret = 0;
8390         int wret;
8391
8392         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8393
8394         path = btrfs_alloc_path();
8395         if (!path)
8396                 return -ENOMEM;
8397
8398         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8399         if (!wc) {
8400                 btrfs_free_path(path);
8401                 return -ENOMEM;
8402         }
8403
8404         btrfs_assert_tree_locked(parent);
8405         parent_level = btrfs_header_level(parent);
8406         extent_buffer_get(parent);
8407         path->nodes[parent_level] = parent;
8408         path->slots[parent_level] = btrfs_header_nritems(parent);
8409
8410         btrfs_assert_tree_locked(node);
8411         level = btrfs_header_level(node);
8412         path->nodes[level] = node;
8413         path->slots[level] = 0;
8414         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8415
8416         wc->refs[parent_level] = 1;
8417         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8418         wc->level = level;
8419         wc->shared_level = -1;
8420         wc->stage = DROP_REFERENCE;
8421         wc->update_ref = 0;
8422         wc->keep_locks = 1;
8423         wc->for_reloc = 1;
8424         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8425
8426         while (1) {
8427                 wret = walk_down_tree(trans, root, path, wc);
8428                 if (wret < 0) {
8429                         ret = wret;
8430                         break;
8431                 }
8432
8433                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8434                 if (wret < 0)
8435                         ret = wret;
8436                 if (wret != 0)
8437                         break;
8438         }
8439
8440         kfree(wc);
8441         btrfs_free_path(path);
8442         return ret;
8443 }
8444
8445 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8446 {
8447         u64 num_devices;
8448         u64 stripped;
8449
8450         /*
8451          * if restripe for this chunk_type is on pick target profile and
8452          * return, otherwise do the usual balance
8453          */
8454         stripped = get_restripe_target(root->fs_info, flags);
8455         if (stripped)
8456                 return extended_to_chunk(stripped);
8457
8458         num_devices = root->fs_info->fs_devices->rw_devices;
8459
8460         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8461                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8462                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8463
8464         if (num_devices == 1) {
8465                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8466                 stripped = flags & ~stripped;
8467
8468                 /* turn raid0 into single device chunks */
8469                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8470                         return stripped;
8471
8472                 /* turn mirroring into duplication */
8473                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8474                              BTRFS_BLOCK_GROUP_RAID10))
8475                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8476         } else {
8477                 /* they already had raid on here, just return */
8478                 if (flags & stripped)
8479                         return flags;
8480
8481                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8482                 stripped = flags & ~stripped;
8483
8484                 /* switch duplicated blocks with raid1 */
8485                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8486                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8487
8488                 /* this is drive concat, leave it alone */
8489         }
8490
8491         return flags;
8492 }
8493
8494 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8495 {
8496         struct btrfs_space_info *sinfo = cache->space_info;
8497         u64 num_bytes;
8498         u64 min_allocable_bytes;
8499         int ret = -ENOSPC;
8500
8501
8502         /*
8503          * We need some metadata space and system metadata space for
8504          * allocating chunks in some corner cases until we force to set
8505          * it to be readonly.
8506          */
8507         if ((sinfo->flags &
8508              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8509             !force)
8510                 min_allocable_bytes = 1 * 1024 * 1024;
8511         else
8512                 min_allocable_bytes = 0;
8513
8514         spin_lock(&sinfo->lock);
8515         spin_lock(&cache->lock);
8516
8517         if (cache->ro) {
8518                 ret = 0;
8519                 goto out;
8520         }
8521
8522         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8523                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8524
8525         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8526             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8527             min_allocable_bytes <= sinfo->total_bytes) {
8528                 sinfo->bytes_readonly += num_bytes;
8529                 cache->ro = 1;
8530                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8531                 ret = 0;
8532         }
8533 out:
8534         spin_unlock(&cache->lock);
8535         spin_unlock(&sinfo->lock);
8536         return ret;
8537 }
8538
8539 int btrfs_set_block_group_ro(struct btrfs_root *root,
8540                              struct btrfs_block_group_cache *cache)
8541
8542 {
8543         struct btrfs_trans_handle *trans;
8544         u64 alloc_flags;
8545         int ret;
8546
8547         BUG_ON(cache->ro);
8548
8549         trans = btrfs_join_transaction(root);
8550         if (IS_ERR(trans))
8551                 return PTR_ERR(trans);
8552
8553         ret = set_block_group_ro(cache, 0);
8554         if (!ret)
8555                 goto out;
8556         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8557         ret = do_chunk_alloc(trans, root, alloc_flags,
8558                              CHUNK_ALLOC_FORCE);
8559         if (ret < 0)
8560                 goto out;
8561         ret = set_block_group_ro(cache, 0);
8562 out:
8563         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8564                 alloc_flags = update_block_group_flags(root, cache->flags);
8565                 check_system_chunk(trans, root, alloc_flags);
8566         }
8567
8568         btrfs_end_transaction(trans, root);
8569         return ret;
8570 }
8571
8572 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8573                             struct btrfs_root *root, u64 type)
8574 {
8575         u64 alloc_flags = get_alloc_profile(root, type);
8576         return do_chunk_alloc(trans, root, alloc_flags,
8577                               CHUNK_ALLOC_FORCE);
8578 }
8579
8580 /*
8581  * helper to account the unused space of all the readonly block group in the
8582  * space_info. takes mirrors into account.
8583  */
8584 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8585 {
8586         struct btrfs_block_group_cache *block_group;
8587         u64 free_bytes = 0;
8588         int factor;
8589
8590         /* It's df, we don't care if it's racey */
8591         if (list_empty(&sinfo->ro_bgs))
8592                 return 0;
8593
8594         spin_lock(&sinfo->lock);
8595         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8596                 spin_lock(&block_group->lock);
8597
8598                 if (!block_group->ro) {
8599                         spin_unlock(&block_group->lock);
8600                         continue;
8601                 }
8602
8603                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8604                                           BTRFS_BLOCK_GROUP_RAID10 |
8605                                           BTRFS_BLOCK_GROUP_DUP))
8606                         factor = 2;
8607                 else
8608                         factor = 1;
8609
8610                 free_bytes += (block_group->key.offset -
8611                                btrfs_block_group_used(&block_group->item)) *
8612                                factor;
8613
8614                 spin_unlock(&block_group->lock);
8615         }
8616         spin_unlock(&sinfo->lock);
8617
8618         return free_bytes;
8619 }
8620
8621 void btrfs_set_block_group_rw(struct btrfs_root *root,
8622                               struct btrfs_block_group_cache *cache)
8623 {
8624         struct btrfs_space_info *sinfo = cache->space_info;
8625         u64 num_bytes;
8626
8627         BUG_ON(!cache->ro);
8628
8629         spin_lock(&sinfo->lock);
8630         spin_lock(&cache->lock);
8631         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8632                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8633         sinfo->bytes_readonly -= num_bytes;
8634         cache->ro = 0;
8635         list_del_init(&cache->ro_list);
8636         spin_unlock(&cache->lock);
8637         spin_unlock(&sinfo->lock);
8638 }
8639
8640 /*
8641  * checks to see if its even possible to relocate this block group.
8642  *
8643  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8644  * ok to go ahead and try.
8645  */
8646 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8647 {
8648         struct btrfs_block_group_cache *block_group;
8649         struct btrfs_space_info *space_info;
8650         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8651         struct btrfs_device *device;
8652         struct btrfs_trans_handle *trans;
8653         u64 min_free;
8654         u64 dev_min = 1;
8655         u64 dev_nr = 0;
8656         u64 target;
8657         int index;
8658         int full = 0;
8659         int ret = 0;
8660
8661         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8662
8663         /* odd, couldn't find the block group, leave it alone */
8664         if (!block_group)
8665                 return -1;
8666
8667         min_free = btrfs_block_group_used(&block_group->item);
8668
8669         /* no bytes used, we're good */
8670         if (!min_free)
8671                 goto out;
8672
8673         space_info = block_group->space_info;
8674         spin_lock(&space_info->lock);
8675
8676         full = space_info->full;
8677
8678         /*
8679          * if this is the last block group we have in this space, we can't
8680          * relocate it unless we're able to allocate a new chunk below.
8681          *
8682          * Otherwise, we need to make sure we have room in the space to handle
8683          * all of the extents from this block group.  If we can, we're good
8684          */
8685         if ((space_info->total_bytes != block_group->key.offset) &&
8686             (space_info->bytes_used + space_info->bytes_reserved +
8687              space_info->bytes_pinned + space_info->bytes_readonly +
8688              min_free < space_info->total_bytes)) {
8689                 spin_unlock(&space_info->lock);
8690                 goto out;
8691         }
8692         spin_unlock(&space_info->lock);
8693
8694         /*
8695          * ok we don't have enough space, but maybe we have free space on our
8696          * devices to allocate new chunks for relocation, so loop through our
8697          * alloc devices and guess if we have enough space.  if this block
8698          * group is going to be restriped, run checks against the target
8699          * profile instead of the current one.
8700          */
8701         ret = -1;
8702
8703         /*
8704          * index:
8705          *      0: raid10
8706          *      1: raid1
8707          *      2: dup
8708          *      3: raid0
8709          *      4: single
8710          */
8711         target = get_restripe_target(root->fs_info, block_group->flags);
8712         if (target) {
8713                 index = __get_raid_index(extended_to_chunk(target));
8714         } else {
8715                 /*
8716                  * this is just a balance, so if we were marked as full
8717                  * we know there is no space for a new chunk
8718                  */
8719                 if (full)
8720                         goto out;
8721
8722                 index = get_block_group_index(block_group);
8723         }
8724
8725         if (index == BTRFS_RAID_RAID10) {
8726                 dev_min = 4;
8727                 /* Divide by 2 */
8728                 min_free >>= 1;
8729         } else if (index == BTRFS_RAID_RAID1) {
8730                 dev_min = 2;
8731         } else if (index == BTRFS_RAID_DUP) {
8732                 /* Multiply by 2 */
8733                 min_free <<= 1;
8734         } else if (index == BTRFS_RAID_RAID0) {
8735                 dev_min = fs_devices->rw_devices;
8736                 min_free = div64_u64(min_free, dev_min);
8737         }
8738
8739         /* We need to do this so that we can look at pending chunks */
8740         trans = btrfs_join_transaction(root);
8741         if (IS_ERR(trans)) {
8742                 ret = PTR_ERR(trans);
8743                 goto out;
8744         }
8745
8746         mutex_lock(&root->fs_info->chunk_mutex);
8747         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8748                 u64 dev_offset;
8749
8750                 /*
8751                  * check to make sure we can actually find a chunk with enough
8752                  * space to fit our block group in.
8753                  */
8754                 if (device->total_bytes > device->bytes_used + min_free &&
8755                     !device->is_tgtdev_for_dev_replace) {
8756                         ret = find_free_dev_extent(trans, device, min_free,
8757                                                    &dev_offset, NULL);
8758                         if (!ret)
8759                                 dev_nr++;
8760
8761                         if (dev_nr >= dev_min)
8762                                 break;
8763
8764                         ret = -1;
8765                 }
8766         }
8767         mutex_unlock(&root->fs_info->chunk_mutex);
8768         btrfs_end_transaction(trans, root);
8769 out:
8770         btrfs_put_block_group(block_group);
8771         return ret;
8772 }
8773
8774 static int find_first_block_group(struct btrfs_root *root,
8775                 struct btrfs_path *path, struct btrfs_key *key)
8776 {
8777         int ret = 0;
8778         struct btrfs_key found_key;
8779         struct extent_buffer *leaf;
8780         int slot;
8781
8782         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8783         if (ret < 0)
8784                 goto out;
8785
8786         while (1) {
8787                 slot = path->slots[0];
8788                 leaf = path->nodes[0];
8789                 if (slot >= btrfs_header_nritems(leaf)) {
8790                         ret = btrfs_next_leaf(root, path);
8791                         if (ret == 0)
8792                                 continue;
8793                         if (ret < 0)
8794                                 goto out;
8795                         break;
8796                 }
8797                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8798
8799                 if (found_key.objectid >= key->objectid &&
8800                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8801                         ret = 0;
8802                         goto out;
8803                 }
8804                 path->slots[0]++;
8805         }
8806 out:
8807         return ret;
8808 }
8809
8810 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8811 {
8812         struct btrfs_block_group_cache *block_group;
8813         u64 last = 0;
8814
8815         while (1) {
8816                 struct inode *inode;
8817
8818                 block_group = btrfs_lookup_first_block_group(info, last);
8819                 while (block_group) {
8820                         spin_lock(&block_group->lock);
8821                         if (block_group->iref)
8822                                 break;
8823                         spin_unlock(&block_group->lock);
8824                         block_group = next_block_group(info->tree_root,
8825                                                        block_group);
8826                 }
8827                 if (!block_group) {
8828                         if (last == 0)
8829                                 break;
8830                         last = 0;
8831                         continue;
8832                 }
8833
8834                 inode = block_group->inode;
8835                 block_group->iref = 0;
8836                 block_group->inode = NULL;
8837                 spin_unlock(&block_group->lock);
8838                 iput(inode);
8839                 last = block_group->key.objectid + block_group->key.offset;
8840                 btrfs_put_block_group(block_group);
8841         }
8842 }
8843
8844 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8845 {
8846         struct btrfs_block_group_cache *block_group;
8847         struct btrfs_space_info *space_info;
8848         struct btrfs_caching_control *caching_ctl;
8849         struct rb_node *n;
8850
8851         down_write(&info->commit_root_sem);
8852         while (!list_empty(&info->caching_block_groups)) {
8853                 caching_ctl = list_entry(info->caching_block_groups.next,
8854                                          struct btrfs_caching_control, list);
8855                 list_del(&caching_ctl->list);
8856                 put_caching_control(caching_ctl);
8857         }
8858         up_write(&info->commit_root_sem);
8859
8860         spin_lock(&info->unused_bgs_lock);
8861         while (!list_empty(&info->unused_bgs)) {
8862                 block_group = list_first_entry(&info->unused_bgs,
8863                                                struct btrfs_block_group_cache,
8864                                                bg_list);
8865                 list_del_init(&block_group->bg_list);
8866                 btrfs_put_block_group(block_group);
8867         }
8868         spin_unlock(&info->unused_bgs_lock);
8869
8870         spin_lock(&info->block_group_cache_lock);
8871         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8872                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8873                                        cache_node);
8874                 rb_erase(&block_group->cache_node,
8875                          &info->block_group_cache_tree);
8876                 RB_CLEAR_NODE(&block_group->cache_node);
8877                 spin_unlock(&info->block_group_cache_lock);
8878
8879                 down_write(&block_group->space_info->groups_sem);
8880                 list_del(&block_group->list);
8881                 up_write(&block_group->space_info->groups_sem);
8882
8883                 if (block_group->cached == BTRFS_CACHE_STARTED)
8884                         wait_block_group_cache_done(block_group);
8885
8886                 /*
8887                  * We haven't cached this block group, which means we could
8888                  * possibly have excluded extents on this block group.
8889                  */
8890                 if (block_group->cached == BTRFS_CACHE_NO ||
8891                     block_group->cached == BTRFS_CACHE_ERROR)
8892                         free_excluded_extents(info->extent_root, block_group);
8893
8894                 btrfs_remove_free_space_cache(block_group);
8895                 btrfs_put_block_group(block_group);
8896
8897                 spin_lock(&info->block_group_cache_lock);
8898         }
8899         spin_unlock(&info->block_group_cache_lock);
8900
8901         /* now that all the block groups are freed, go through and
8902          * free all the space_info structs.  This is only called during
8903          * the final stages of unmount, and so we know nobody is
8904          * using them.  We call synchronize_rcu() once before we start,
8905          * just to be on the safe side.
8906          */
8907         synchronize_rcu();
8908
8909         release_global_block_rsv(info);
8910
8911         while (!list_empty(&info->space_info)) {
8912                 int i;
8913
8914                 space_info = list_entry(info->space_info.next,
8915                                         struct btrfs_space_info,
8916                                         list);
8917                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8918                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8919                             space_info->bytes_reserved > 0 ||
8920                             space_info->bytes_may_use > 0)) {
8921                                 dump_space_info(space_info, 0, 0);
8922                         }
8923                 }
8924                 list_del(&space_info->list);
8925                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8926                         struct kobject *kobj;
8927                         kobj = space_info->block_group_kobjs[i];
8928                         space_info->block_group_kobjs[i] = NULL;
8929                         if (kobj) {
8930                                 kobject_del(kobj);
8931                                 kobject_put(kobj);
8932                         }
8933                 }
8934                 kobject_del(&space_info->kobj);
8935                 kobject_put(&space_info->kobj);
8936         }
8937         return 0;
8938 }
8939
8940 static void __link_block_group(struct btrfs_space_info *space_info,
8941                                struct btrfs_block_group_cache *cache)
8942 {
8943         int index = get_block_group_index(cache);
8944         bool first = false;
8945
8946         down_write(&space_info->groups_sem);
8947         if (list_empty(&space_info->block_groups[index]))
8948                 first = true;
8949         list_add_tail(&cache->list, &space_info->block_groups[index]);
8950         up_write(&space_info->groups_sem);
8951
8952         if (first) {
8953                 struct raid_kobject *rkobj;
8954                 int ret;
8955
8956                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8957                 if (!rkobj)
8958                         goto out_err;
8959                 rkobj->raid_type = index;
8960                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8961                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8962                                   "%s", get_raid_name(index));
8963                 if (ret) {
8964                         kobject_put(&rkobj->kobj);
8965                         goto out_err;
8966                 }
8967                 space_info->block_group_kobjs[index] = &rkobj->kobj;
8968         }
8969
8970         return;
8971 out_err:
8972         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8973 }
8974
8975 static struct btrfs_block_group_cache *
8976 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8977 {
8978         struct btrfs_block_group_cache *cache;
8979
8980         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8981         if (!cache)
8982                 return NULL;
8983
8984         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8985                                         GFP_NOFS);
8986         if (!cache->free_space_ctl) {
8987                 kfree(cache);
8988                 return NULL;
8989         }
8990
8991         cache->key.objectid = start;
8992         cache->key.offset = size;
8993         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8994
8995         cache->sectorsize = root->sectorsize;
8996         cache->fs_info = root->fs_info;
8997         cache->full_stripe_len = btrfs_full_stripe_len(root,
8998                                                &root->fs_info->mapping_tree,
8999                                                start);
9000         atomic_set(&cache->count, 1);
9001         spin_lock_init(&cache->lock);
9002         init_rwsem(&cache->data_rwsem);
9003         INIT_LIST_HEAD(&cache->list);
9004         INIT_LIST_HEAD(&cache->cluster_list);
9005         INIT_LIST_HEAD(&cache->bg_list);
9006         INIT_LIST_HEAD(&cache->ro_list);
9007         INIT_LIST_HEAD(&cache->dirty_list);
9008         btrfs_init_free_space_ctl(cache);
9009         atomic_set(&cache->trimming, 0);
9010
9011         return cache;
9012 }
9013
9014 int btrfs_read_block_groups(struct btrfs_root *root)
9015 {
9016         struct btrfs_path *path;
9017         int ret;
9018         struct btrfs_block_group_cache *cache;
9019         struct btrfs_fs_info *info = root->fs_info;
9020         struct btrfs_space_info *space_info;
9021         struct btrfs_key key;
9022         struct btrfs_key found_key;
9023         struct extent_buffer *leaf;
9024         int need_clear = 0;
9025         u64 cache_gen;
9026
9027         root = info->extent_root;
9028         key.objectid = 0;
9029         key.offset = 0;
9030         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9031         path = btrfs_alloc_path();
9032         if (!path)
9033                 return -ENOMEM;
9034         path->reada = 1;
9035
9036         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9037         if (btrfs_test_opt(root, SPACE_CACHE) &&
9038             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9039                 need_clear = 1;
9040         if (btrfs_test_opt(root, CLEAR_CACHE))
9041                 need_clear = 1;
9042
9043         while (1) {
9044                 ret = find_first_block_group(root, path, &key);
9045                 if (ret > 0)
9046                         break;
9047                 if (ret != 0)
9048                         goto error;
9049
9050                 leaf = path->nodes[0];
9051                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9052
9053                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9054                                                        found_key.offset);
9055                 if (!cache) {
9056                         ret = -ENOMEM;
9057                         goto error;
9058                 }
9059
9060                 if (need_clear) {
9061                         /*
9062                          * When we mount with old space cache, we need to
9063                          * set BTRFS_DC_CLEAR and set dirty flag.
9064                          *
9065                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9066                          *    truncate the old free space cache inode and
9067                          *    setup a new one.
9068                          * b) Setting 'dirty flag' makes sure that we flush
9069                          *    the new space cache info onto disk.
9070                          */
9071                         if (btrfs_test_opt(root, SPACE_CACHE))
9072                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9073                 }
9074
9075                 read_extent_buffer(leaf, &cache->item,
9076                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9077                                    sizeof(cache->item));
9078                 cache->flags = btrfs_block_group_flags(&cache->item);
9079
9080                 key.objectid = found_key.objectid + found_key.offset;
9081                 btrfs_release_path(path);
9082
9083                 /*
9084                  * We need to exclude the super stripes now so that the space
9085                  * info has super bytes accounted for, otherwise we'll think
9086                  * we have more space than we actually do.
9087                  */
9088                 ret = exclude_super_stripes(root, cache);
9089                 if (ret) {
9090                         /*
9091                          * We may have excluded something, so call this just in
9092                          * case.
9093                          */
9094                         free_excluded_extents(root, cache);
9095                         btrfs_put_block_group(cache);
9096                         goto error;
9097                 }
9098
9099                 /*
9100                  * check for two cases, either we are full, and therefore
9101                  * don't need to bother with the caching work since we won't
9102                  * find any space, or we are empty, and we can just add all
9103                  * the space in and be done with it.  This saves us _alot_ of
9104                  * time, particularly in the full case.
9105                  */
9106                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9107                         cache->last_byte_to_unpin = (u64)-1;
9108                         cache->cached = BTRFS_CACHE_FINISHED;
9109                         free_excluded_extents(root, cache);
9110                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9111                         cache->last_byte_to_unpin = (u64)-1;
9112                         cache->cached = BTRFS_CACHE_FINISHED;
9113                         add_new_free_space(cache, root->fs_info,
9114                                            found_key.objectid,
9115                                            found_key.objectid +
9116                                            found_key.offset);
9117                         free_excluded_extents(root, cache);
9118                 }
9119
9120                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9121                 if (ret) {
9122                         btrfs_remove_free_space_cache(cache);
9123                         btrfs_put_block_group(cache);
9124                         goto error;
9125                 }
9126
9127                 ret = update_space_info(info, cache->flags, found_key.offset,
9128                                         btrfs_block_group_used(&cache->item),
9129                                         &space_info);
9130                 if (ret) {
9131                         btrfs_remove_free_space_cache(cache);
9132                         spin_lock(&info->block_group_cache_lock);
9133                         rb_erase(&cache->cache_node,
9134                                  &info->block_group_cache_tree);
9135                         RB_CLEAR_NODE(&cache->cache_node);
9136                         spin_unlock(&info->block_group_cache_lock);
9137                         btrfs_put_block_group(cache);
9138                         goto error;
9139                 }
9140
9141                 cache->space_info = space_info;
9142                 spin_lock(&cache->space_info->lock);
9143                 cache->space_info->bytes_readonly += cache->bytes_super;
9144                 spin_unlock(&cache->space_info->lock);
9145
9146                 __link_block_group(space_info, cache);
9147
9148                 set_avail_alloc_bits(root->fs_info, cache->flags);
9149                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9150                         set_block_group_ro(cache, 1);
9151                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9152                         spin_lock(&info->unused_bgs_lock);
9153                         /* Should always be true but just in case. */
9154                         if (list_empty(&cache->bg_list)) {
9155                                 btrfs_get_block_group(cache);
9156                                 list_add_tail(&cache->bg_list,
9157                                               &info->unused_bgs);
9158                         }
9159                         spin_unlock(&info->unused_bgs_lock);
9160                 }
9161         }
9162
9163         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9164                 if (!(get_alloc_profile(root, space_info->flags) &
9165                       (BTRFS_BLOCK_GROUP_RAID10 |
9166                        BTRFS_BLOCK_GROUP_RAID1 |
9167                        BTRFS_BLOCK_GROUP_RAID5 |
9168                        BTRFS_BLOCK_GROUP_RAID6 |
9169                        BTRFS_BLOCK_GROUP_DUP)))
9170                         continue;
9171                 /*
9172                  * avoid allocating from un-mirrored block group if there are
9173                  * mirrored block groups.
9174                  */
9175                 list_for_each_entry(cache,
9176                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9177                                 list)
9178                         set_block_group_ro(cache, 1);
9179                 list_for_each_entry(cache,
9180                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9181                                 list)
9182                         set_block_group_ro(cache, 1);
9183         }
9184
9185         init_global_block_rsv(info);
9186         ret = 0;
9187 error:
9188         btrfs_free_path(path);
9189         return ret;
9190 }
9191
9192 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9193                                        struct btrfs_root *root)
9194 {
9195         struct btrfs_block_group_cache *block_group, *tmp;
9196         struct btrfs_root *extent_root = root->fs_info->extent_root;
9197         struct btrfs_block_group_item item;
9198         struct btrfs_key key;
9199         int ret = 0;
9200
9201         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9202                 if (ret)
9203                         goto next;
9204
9205                 spin_lock(&block_group->lock);
9206                 memcpy(&item, &block_group->item, sizeof(item));
9207                 memcpy(&key, &block_group->key, sizeof(key));
9208                 spin_unlock(&block_group->lock);
9209
9210                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9211                                         sizeof(item));
9212                 if (ret)
9213                         btrfs_abort_transaction(trans, extent_root, ret);
9214                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9215                                                key.objectid, key.offset);
9216                 if (ret)
9217                         btrfs_abort_transaction(trans, extent_root, ret);
9218 next:
9219                 list_del_init(&block_group->bg_list);
9220         }
9221 }
9222
9223 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9224                            struct btrfs_root *root, u64 bytes_used,
9225                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9226                            u64 size)
9227 {
9228         int ret;
9229         struct btrfs_root *extent_root;
9230         struct btrfs_block_group_cache *cache;
9231
9232         extent_root = root->fs_info->extent_root;
9233
9234         btrfs_set_log_full_commit(root->fs_info, trans);
9235
9236         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9237         if (!cache)
9238                 return -ENOMEM;
9239
9240         btrfs_set_block_group_used(&cache->item, bytes_used);
9241         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9242         btrfs_set_block_group_flags(&cache->item, type);
9243
9244         cache->flags = type;
9245         cache->last_byte_to_unpin = (u64)-1;
9246         cache->cached = BTRFS_CACHE_FINISHED;
9247         ret = exclude_super_stripes(root, cache);
9248         if (ret) {
9249                 /*
9250                  * We may have excluded something, so call this just in
9251                  * case.
9252                  */
9253                 free_excluded_extents(root, cache);
9254                 btrfs_put_block_group(cache);
9255                 return ret;
9256         }
9257
9258         add_new_free_space(cache, root->fs_info, chunk_offset,
9259                            chunk_offset + size);
9260
9261         free_excluded_extents(root, cache);
9262
9263         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9264         if (ret) {
9265                 btrfs_remove_free_space_cache(cache);
9266                 btrfs_put_block_group(cache);
9267                 return ret;
9268         }
9269
9270         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9271                                 &cache->space_info);
9272         if (ret) {
9273                 btrfs_remove_free_space_cache(cache);
9274                 spin_lock(&root->fs_info->block_group_cache_lock);
9275                 rb_erase(&cache->cache_node,
9276                          &root->fs_info->block_group_cache_tree);
9277                 RB_CLEAR_NODE(&cache->cache_node);
9278                 spin_unlock(&root->fs_info->block_group_cache_lock);
9279                 btrfs_put_block_group(cache);
9280                 return ret;
9281         }
9282         update_global_block_rsv(root->fs_info);
9283
9284         spin_lock(&cache->space_info->lock);
9285         cache->space_info->bytes_readonly += cache->bytes_super;
9286         spin_unlock(&cache->space_info->lock);
9287
9288         __link_block_group(cache->space_info, cache);
9289
9290         list_add_tail(&cache->bg_list, &trans->new_bgs);
9291
9292         set_avail_alloc_bits(extent_root->fs_info, type);
9293
9294         return 0;
9295 }
9296
9297 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9298 {
9299         u64 extra_flags = chunk_to_extended(flags) &
9300                                 BTRFS_EXTENDED_PROFILE_MASK;
9301
9302         write_seqlock(&fs_info->profiles_lock);
9303         if (flags & BTRFS_BLOCK_GROUP_DATA)
9304                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9305         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9306                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9307         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9308                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9309         write_sequnlock(&fs_info->profiles_lock);
9310 }
9311
9312 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9313                              struct btrfs_root *root, u64 group_start,
9314                              struct extent_map *em)
9315 {
9316         struct btrfs_path *path;
9317         struct btrfs_block_group_cache *block_group;
9318         struct btrfs_free_cluster *cluster;
9319         struct btrfs_root *tree_root = root->fs_info->tree_root;
9320         struct btrfs_key key;
9321         struct inode *inode;
9322         struct kobject *kobj = NULL;
9323         int ret;
9324         int index;
9325         int factor;
9326         struct btrfs_caching_control *caching_ctl = NULL;
9327         bool remove_em;
9328
9329         root = root->fs_info->extent_root;
9330
9331         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9332         BUG_ON(!block_group);
9333         BUG_ON(!block_group->ro);
9334
9335         /*
9336          * Free the reserved super bytes from this block group before
9337          * remove it.
9338          */
9339         free_excluded_extents(root, block_group);
9340
9341         memcpy(&key, &block_group->key, sizeof(key));
9342         index = get_block_group_index(block_group);
9343         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9344                                   BTRFS_BLOCK_GROUP_RAID1 |
9345                                   BTRFS_BLOCK_GROUP_RAID10))
9346                 factor = 2;
9347         else
9348                 factor = 1;
9349
9350         /* make sure this block group isn't part of an allocation cluster */
9351         cluster = &root->fs_info->data_alloc_cluster;
9352         spin_lock(&cluster->refill_lock);
9353         btrfs_return_cluster_to_free_space(block_group, cluster);
9354         spin_unlock(&cluster->refill_lock);
9355
9356         /*
9357          * make sure this block group isn't part of a metadata
9358          * allocation cluster
9359          */
9360         cluster = &root->fs_info->meta_alloc_cluster;
9361         spin_lock(&cluster->refill_lock);
9362         btrfs_return_cluster_to_free_space(block_group, cluster);
9363         spin_unlock(&cluster->refill_lock);
9364
9365         path = btrfs_alloc_path();
9366         if (!path) {
9367                 ret = -ENOMEM;
9368                 goto out;
9369         }
9370
9371         inode = lookup_free_space_inode(tree_root, block_group, path);
9372         if (!IS_ERR(inode)) {
9373                 ret = btrfs_orphan_add(trans, inode);
9374                 if (ret) {
9375                         btrfs_add_delayed_iput(inode);
9376                         goto out;
9377                 }
9378                 clear_nlink(inode);
9379                 /* One for the block groups ref */
9380                 spin_lock(&block_group->lock);
9381                 if (block_group->iref) {
9382                         block_group->iref = 0;
9383                         block_group->inode = NULL;
9384                         spin_unlock(&block_group->lock);
9385                         iput(inode);
9386                 } else {
9387                         spin_unlock(&block_group->lock);
9388                 }
9389                 /* One for our lookup ref */
9390                 btrfs_add_delayed_iput(inode);
9391         }
9392
9393         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9394         key.offset = block_group->key.objectid;
9395         key.type = 0;
9396
9397         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9398         if (ret < 0)
9399                 goto out;
9400         if (ret > 0)
9401                 btrfs_release_path(path);
9402         if (ret == 0) {
9403                 ret = btrfs_del_item(trans, tree_root, path);
9404                 if (ret)
9405                         goto out;
9406                 btrfs_release_path(path);
9407         }
9408
9409         spin_lock(&root->fs_info->block_group_cache_lock);
9410         rb_erase(&block_group->cache_node,
9411                  &root->fs_info->block_group_cache_tree);
9412         RB_CLEAR_NODE(&block_group->cache_node);
9413
9414         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9415                 root->fs_info->first_logical_byte = (u64)-1;
9416         spin_unlock(&root->fs_info->block_group_cache_lock);
9417
9418         down_write(&block_group->space_info->groups_sem);
9419         /*
9420          * we must use list_del_init so people can check to see if they
9421          * are still on the list after taking the semaphore
9422          */
9423         list_del_init(&block_group->list);
9424         if (list_empty(&block_group->space_info->block_groups[index])) {
9425                 kobj = block_group->space_info->block_group_kobjs[index];
9426                 block_group->space_info->block_group_kobjs[index] = NULL;
9427                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9428         }
9429         up_write(&block_group->space_info->groups_sem);
9430         if (kobj) {
9431                 kobject_del(kobj);
9432                 kobject_put(kobj);
9433         }
9434
9435         if (block_group->has_caching_ctl)
9436                 caching_ctl = get_caching_control(block_group);
9437         if (block_group->cached == BTRFS_CACHE_STARTED)
9438                 wait_block_group_cache_done(block_group);
9439         if (block_group->has_caching_ctl) {
9440                 down_write(&root->fs_info->commit_root_sem);
9441                 if (!caching_ctl) {
9442                         struct btrfs_caching_control *ctl;
9443
9444                         list_for_each_entry(ctl,
9445                                     &root->fs_info->caching_block_groups, list)
9446                                 if (ctl->block_group == block_group) {
9447                                         caching_ctl = ctl;
9448                                         atomic_inc(&caching_ctl->count);
9449                                         break;
9450                                 }
9451                 }
9452                 if (caching_ctl)
9453                         list_del_init(&caching_ctl->list);
9454                 up_write(&root->fs_info->commit_root_sem);
9455                 if (caching_ctl) {
9456                         /* Once for the caching bgs list and once for us. */
9457                         put_caching_control(caching_ctl);
9458                         put_caching_control(caching_ctl);
9459                 }
9460         }
9461
9462         spin_lock(&trans->transaction->dirty_bgs_lock);
9463         if (!list_empty(&block_group->dirty_list)) {
9464                 list_del_init(&block_group->dirty_list);
9465                 btrfs_put_block_group(block_group);
9466         }
9467         spin_unlock(&trans->transaction->dirty_bgs_lock);
9468
9469         btrfs_remove_free_space_cache(block_group);
9470
9471         spin_lock(&block_group->space_info->lock);
9472         list_del_init(&block_group->ro_list);
9473         block_group->space_info->total_bytes -= block_group->key.offset;
9474         block_group->space_info->bytes_readonly -= block_group->key.offset;
9475         block_group->space_info->disk_total -= block_group->key.offset * factor;
9476         spin_unlock(&block_group->space_info->lock);
9477
9478         memcpy(&key, &block_group->key, sizeof(key));
9479
9480         lock_chunks(root);
9481         if (!list_empty(&em->list)) {
9482                 /* We're in the transaction->pending_chunks list. */
9483                 free_extent_map(em);
9484         }
9485         spin_lock(&block_group->lock);
9486         block_group->removed = 1;
9487         /*
9488          * At this point trimming can't start on this block group, because we
9489          * removed the block group from the tree fs_info->block_group_cache_tree
9490          * so no one can't find it anymore and even if someone already got this
9491          * block group before we removed it from the rbtree, they have already
9492          * incremented block_group->trimming - if they didn't, they won't find
9493          * any free space entries because we already removed them all when we
9494          * called btrfs_remove_free_space_cache().
9495          *
9496          * And we must not remove the extent map from the fs_info->mapping_tree
9497          * to prevent the same logical address range and physical device space
9498          * ranges from being reused for a new block group. This is because our
9499          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9500          * completely transactionless, so while it is trimming a range the
9501          * currently running transaction might finish and a new one start,
9502          * allowing for new block groups to be created that can reuse the same
9503          * physical device locations unless we take this special care.
9504          */
9505         remove_em = (atomic_read(&block_group->trimming) == 0);
9506         /*
9507          * Make sure a trimmer task always sees the em in the pinned_chunks list
9508          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9509          * before checking block_group->removed).
9510          */
9511         if (!remove_em) {
9512                 /*
9513                  * Our em might be in trans->transaction->pending_chunks which
9514                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9515                  * and so is the fs_info->pinned_chunks list.
9516                  *
9517                  * So at this point we must be holding the chunk_mutex to avoid
9518                  * any races with chunk allocation (more specifically at
9519                  * volumes.c:contains_pending_extent()), to ensure it always
9520                  * sees the em, either in the pending_chunks list or in the
9521                  * pinned_chunks list.
9522                  */
9523                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9524         }
9525         spin_unlock(&block_group->lock);
9526
9527         if (remove_em) {
9528                 struct extent_map_tree *em_tree;
9529
9530                 em_tree = &root->fs_info->mapping_tree.map_tree;
9531                 write_lock(&em_tree->lock);
9532                 /*
9533                  * The em might be in the pending_chunks list, so make sure the
9534                  * chunk mutex is locked, since remove_extent_mapping() will
9535                  * delete us from that list.
9536                  */
9537                 remove_extent_mapping(em_tree, em);
9538                 write_unlock(&em_tree->lock);
9539                 /* once for the tree */
9540                 free_extent_map(em);
9541         }
9542
9543         unlock_chunks(root);
9544
9545         btrfs_put_block_group(block_group);
9546         btrfs_put_block_group(block_group);
9547
9548         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9549         if (ret > 0)
9550                 ret = -EIO;
9551         if (ret < 0)
9552                 goto out;
9553
9554         ret = btrfs_del_item(trans, root, path);
9555 out:
9556         btrfs_free_path(path);
9557         return ret;
9558 }
9559
9560 /*
9561  * Process the unused_bgs list and remove any that don't have any allocated
9562  * space inside of them.
9563  */
9564 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9565 {
9566         struct btrfs_block_group_cache *block_group;
9567         struct btrfs_space_info *space_info;
9568         struct btrfs_root *root = fs_info->extent_root;
9569         struct btrfs_trans_handle *trans;
9570         int ret = 0;
9571
9572         if (!fs_info->open)
9573                 return;
9574
9575         spin_lock(&fs_info->unused_bgs_lock);
9576         while (!list_empty(&fs_info->unused_bgs)) {
9577                 u64 start, end;
9578
9579                 block_group = list_first_entry(&fs_info->unused_bgs,
9580                                                struct btrfs_block_group_cache,
9581                                                bg_list);
9582                 space_info = block_group->space_info;
9583                 list_del_init(&block_group->bg_list);
9584                 if (ret || btrfs_mixed_space_info(space_info)) {
9585                         btrfs_put_block_group(block_group);
9586                         continue;
9587                 }
9588                 spin_unlock(&fs_info->unused_bgs_lock);
9589
9590                 /* Don't want to race with allocators so take the groups_sem */
9591                 down_write(&space_info->groups_sem);
9592                 spin_lock(&block_group->lock);
9593                 if (block_group->reserved ||
9594                     btrfs_block_group_used(&block_group->item) ||
9595                     block_group->ro) {
9596                         /*
9597                          * We want to bail if we made new allocations or have
9598                          * outstanding allocations in this block group.  We do
9599                          * the ro check in case balance is currently acting on
9600                          * this block group.
9601                          */
9602                         spin_unlock(&block_group->lock);
9603                         up_write(&space_info->groups_sem);
9604                         goto next;
9605                 }
9606                 spin_unlock(&block_group->lock);
9607
9608                 /* We don't want to force the issue, only flip if it's ok. */
9609                 ret = set_block_group_ro(block_group, 0);
9610                 up_write(&space_info->groups_sem);
9611                 if (ret < 0) {
9612                         ret = 0;
9613                         goto next;
9614                 }
9615
9616                 /*
9617                  * Want to do this before we do anything else so we can recover
9618                  * properly if we fail to join the transaction.
9619                  */
9620                 /* 1 for btrfs_orphan_reserve_metadata() */
9621                 trans = btrfs_start_transaction(root, 1);
9622                 if (IS_ERR(trans)) {
9623                         btrfs_set_block_group_rw(root, block_group);
9624                         ret = PTR_ERR(trans);
9625                         goto next;
9626                 }
9627
9628                 /*
9629                  * We could have pending pinned extents for this block group,
9630                  * just delete them, we don't care about them anymore.
9631                  */
9632                 start = block_group->key.objectid;
9633                 end = start + block_group->key.offset - 1;
9634                 /*
9635                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
9636                  * btrfs_finish_extent_commit(). If we are at transaction N,
9637                  * another task might be running finish_extent_commit() for the
9638                  * previous transaction N - 1, and have seen a range belonging
9639                  * to the block group in freed_extents[] before we were able to
9640                  * clear the whole block group range from freed_extents[]. This
9641                  * means that task can lookup for the block group after we
9642                  * unpinned it from freed_extents[] and removed it, leading to
9643                  * a BUG_ON() at btrfs_unpin_extent_range().
9644                  */
9645                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9646                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9647                                   EXTENT_DIRTY, GFP_NOFS);
9648                 if (ret) {
9649                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9650                         btrfs_set_block_group_rw(root, block_group);
9651                         goto end_trans;
9652                 }
9653                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9654                                   EXTENT_DIRTY, GFP_NOFS);
9655                 if (ret) {
9656                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9657                         btrfs_set_block_group_rw(root, block_group);
9658                         goto end_trans;
9659                 }
9660                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9661
9662                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9663                 block_group->pinned = 0;
9664
9665                 /*
9666                  * Btrfs_remove_chunk will abort the transaction if things go
9667                  * horribly wrong.
9668                  */
9669                 ret = btrfs_remove_chunk(trans, root,
9670                                          block_group->key.objectid);
9671 end_trans:
9672                 btrfs_end_transaction(trans, root);
9673 next:
9674                 btrfs_put_block_group(block_group);
9675                 spin_lock(&fs_info->unused_bgs_lock);
9676         }
9677         spin_unlock(&fs_info->unused_bgs_lock);
9678 }
9679
9680 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9681 {
9682         struct btrfs_space_info *space_info;
9683         struct btrfs_super_block *disk_super;
9684         u64 features;
9685         u64 flags;
9686         int mixed = 0;
9687         int ret;
9688
9689         disk_super = fs_info->super_copy;
9690         if (!btrfs_super_root(disk_super))
9691                 return 1;
9692
9693         features = btrfs_super_incompat_flags(disk_super);
9694         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9695                 mixed = 1;
9696
9697         flags = BTRFS_BLOCK_GROUP_SYSTEM;
9698         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9699         if (ret)
9700                 goto out;
9701
9702         if (mixed) {
9703                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9704                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9705         } else {
9706                 flags = BTRFS_BLOCK_GROUP_METADATA;
9707                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9708                 if (ret)
9709                         goto out;
9710
9711                 flags = BTRFS_BLOCK_GROUP_DATA;
9712                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9713         }
9714 out:
9715         return ret;
9716 }
9717
9718 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9719 {
9720         return unpin_extent_range(root, start, end, false);
9721 }
9722
9723 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9724 {
9725         struct btrfs_fs_info *fs_info = root->fs_info;
9726         struct btrfs_block_group_cache *cache = NULL;
9727         u64 group_trimmed;
9728         u64 start;
9729         u64 end;
9730         u64 trimmed = 0;
9731         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9732         int ret = 0;
9733
9734         /*
9735          * try to trim all FS space, our block group may start from non-zero.
9736          */
9737         if (range->len == total_bytes)
9738                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9739         else
9740                 cache = btrfs_lookup_block_group(fs_info, range->start);
9741
9742         while (cache) {
9743                 if (cache->key.objectid >= (range->start + range->len)) {
9744                         btrfs_put_block_group(cache);
9745                         break;
9746                 }
9747
9748                 start = max(range->start, cache->key.objectid);
9749                 end = min(range->start + range->len,
9750                                 cache->key.objectid + cache->key.offset);
9751
9752                 if (end - start >= range->minlen) {
9753                         if (!block_group_cache_done(cache)) {
9754                                 ret = cache_block_group(cache, 0);
9755                                 if (ret) {
9756                                         btrfs_put_block_group(cache);
9757                                         break;
9758                                 }
9759                                 ret = wait_block_group_cache_done(cache);
9760                                 if (ret) {
9761                                         btrfs_put_block_group(cache);
9762                                         break;
9763                                 }
9764                         }
9765                         ret = btrfs_trim_block_group(cache,
9766                                                      &group_trimmed,
9767                                                      start,
9768                                                      end,
9769                                                      range->minlen);
9770
9771                         trimmed += group_trimmed;
9772                         if (ret) {
9773                                 btrfs_put_block_group(cache);
9774                                 break;
9775                         }
9776                 }
9777
9778                 cache = next_block_group(fs_info->tree_root, cache);
9779         }
9780
9781         range->len = trimmed;
9782         return ret;
9783 }
9784
9785 /*
9786  * btrfs_{start,end}_write_no_snapshoting() are similar to
9787  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9788  * data into the page cache through nocow before the subvolume is snapshoted,
9789  * but flush the data into disk after the snapshot creation, or to prevent
9790  * operations while snapshoting is ongoing and that cause the snapshot to be
9791  * inconsistent (writes followed by expanding truncates for example).
9792  */
9793 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9794 {
9795         percpu_counter_dec(&root->subv_writers->counter);
9796         /*
9797          * Make sure counter is updated before we wake up
9798          * waiters.
9799          */
9800         smp_mb();
9801         if (waitqueue_active(&root->subv_writers->wait))
9802                 wake_up(&root->subv_writers->wait);
9803 }
9804
9805 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9806 {
9807         if (atomic_read(&root->will_be_snapshoted))
9808                 return 0;
9809
9810         percpu_counter_inc(&root->subv_writers->counter);
9811         /*
9812          * Make sure counter is updated before we check for snapshot creation.
9813          */
9814         smp_mb();
9815         if (atomic_read(&root->will_be_snapshoted)) {
9816                 btrfs_end_write_no_snapshoting(root);
9817                 return 0;
9818         }
9819         return 1;
9820 }