Btrfs: don't commit the transaction in the async space flushing
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 u64 bytenr, u64 num_bytes, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op,
86                                 int no_quota);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88                                     struct extent_buffer *leaf,
89                                     struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91                                       struct btrfs_root *root,
92                                       u64 parent, u64 root_objectid,
93                                       u64 flags, u64 owner, u64 offset,
94                                       struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96                                      struct btrfs_root *root,
97                                      u64 parent, u64 root_objectid,
98                                      u64 flags, struct btrfs_disk_key *key,
99                                      int level, struct btrfs_key *ins,
100                                      int no_quota);
101 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
102                           struct btrfs_root *extent_root, u64 flags,
103                           int force);
104 static int find_next_key(struct btrfs_path *path, int level,
105                          struct btrfs_key *key);
106 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
107                             int dump_block_groups);
108 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
109                                        u64 num_bytes, int reserve,
110                                        int delalloc);
111 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
112                                u64 num_bytes);
113 int btrfs_pin_extent(struct btrfs_root *root,
114                      u64 bytenr, u64 num_bytes, int reserved);
115
116 static noinline int
117 block_group_cache_done(struct btrfs_block_group_cache *cache)
118 {
119         smp_mb();
120         return cache->cached == BTRFS_CACHE_FINISHED ||
121                 cache->cached == BTRFS_CACHE_ERROR;
122 }
123
124 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
125 {
126         return (cache->flags & bits) == bits;
127 }
128
129 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
130 {
131         atomic_inc(&cache->count);
132 }
133
134 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
135 {
136         if (atomic_dec_and_test(&cache->count)) {
137                 WARN_ON(cache->pinned > 0);
138                 WARN_ON(cache->reserved > 0);
139                 kfree(cache->free_space_ctl);
140                 kfree(cache);
141         }
142 }
143
144 /*
145  * this adds the block group to the fs_info rb tree for the block group
146  * cache
147  */
148 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
149                                 struct btrfs_block_group_cache *block_group)
150 {
151         struct rb_node **p;
152         struct rb_node *parent = NULL;
153         struct btrfs_block_group_cache *cache;
154
155         spin_lock(&info->block_group_cache_lock);
156         p = &info->block_group_cache_tree.rb_node;
157
158         while (*p) {
159                 parent = *p;
160                 cache = rb_entry(parent, struct btrfs_block_group_cache,
161                                  cache_node);
162                 if (block_group->key.objectid < cache->key.objectid) {
163                         p = &(*p)->rb_left;
164                 } else if (block_group->key.objectid > cache->key.objectid) {
165                         p = &(*p)->rb_right;
166                 } else {
167                         spin_unlock(&info->block_group_cache_lock);
168                         return -EEXIST;
169                 }
170         }
171
172         rb_link_node(&block_group->cache_node, parent, p);
173         rb_insert_color(&block_group->cache_node,
174                         &info->block_group_cache_tree);
175
176         if (info->first_logical_byte > block_group->key.objectid)
177                 info->first_logical_byte = block_group->key.objectid;
178
179         spin_unlock(&info->block_group_cache_lock);
180
181         return 0;
182 }
183
184 /*
185  * This will return the block group at or after bytenr if contains is 0, else
186  * it will return the block group that contains the bytenr
187  */
188 static struct btrfs_block_group_cache *
189 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
190                               int contains)
191 {
192         struct btrfs_block_group_cache *cache, *ret = NULL;
193         struct rb_node *n;
194         u64 end, start;
195
196         spin_lock(&info->block_group_cache_lock);
197         n = info->block_group_cache_tree.rb_node;
198
199         while (n) {
200                 cache = rb_entry(n, struct btrfs_block_group_cache,
201                                  cache_node);
202                 end = cache->key.objectid + cache->key.offset - 1;
203                 start = cache->key.objectid;
204
205                 if (bytenr < start) {
206                         if (!contains && (!ret || start < ret->key.objectid))
207                                 ret = cache;
208                         n = n->rb_left;
209                 } else if (bytenr > start) {
210                         if (contains && bytenr <= end) {
211                                 ret = cache;
212                                 break;
213                         }
214                         n = n->rb_right;
215                 } else {
216                         ret = cache;
217                         break;
218                 }
219         }
220         if (ret) {
221                 btrfs_get_block_group(ret);
222                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
223                         info->first_logical_byte = ret->key.objectid;
224         }
225         spin_unlock(&info->block_group_cache_lock);
226
227         return ret;
228 }
229
230 static int add_excluded_extent(struct btrfs_root *root,
231                                u64 start, u64 num_bytes)
232 {
233         u64 end = start + num_bytes - 1;
234         set_extent_bits(&root->fs_info->freed_extents[0],
235                         start, end, EXTENT_UPTODATE, GFP_NOFS);
236         set_extent_bits(&root->fs_info->freed_extents[1],
237                         start, end, EXTENT_UPTODATE, GFP_NOFS);
238         return 0;
239 }
240
241 static void free_excluded_extents(struct btrfs_root *root,
242                                   struct btrfs_block_group_cache *cache)
243 {
244         u64 start, end;
245
246         start = cache->key.objectid;
247         end = start + cache->key.offset - 1;
248
249         clear_extent_bits(&root->fs_info->freed_extents[0],
250                           start, end, EXTENT_UPTODATE, GFP_NOFS);
251         clear_extent_bits(&root->fs_info->freed_extents[1],
252                           start, end, EXTENT_UPTODATE, GFP_NOFS);
253 }
254
255 static int exclude_super_stripes(struct btrfs_root *root,
256                                  struct btrfs_block_group_cache *cache)
257 {
258         u64 bytenr;
259         u64 *logical;
260         int stripe_len;
261         int i, nr, ret;
262
263         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
264                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
265                 cache->bytes_super += stripe_len;
266                 ret = add_excluded_extent(root, cache->key.objectid,
267                                           stripe_len);
268                 if (ret)
269                         return ret;
270         }
271
272         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
273                 bytenr = btrfs_sb_offset(i);
274                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
275                                        cache->key.objectid, bytenr,
276                                        0, &logical, &nr, &stripe_len);
277                 if (ret)
278                         return ret;
279
280                 while (nr--) {
281                         u64 start, len;
282
283                         if (logical[nr] > cache->key.objectid +
284                             cache->key.offset)
285                                 continue;
286
287                         if (logical[nr] + stripe_len <= cache->key.objectid)
288                                 continue;
289
290                         start = logical[nr];
291                         if (start < cache->key.objectid) {
292                                 start = cache->key.objectid;
293                                 len = (logical[nr] + stripe_len) - start;
294                         } else {
295                                 len = min_t(u64, stripe_len,
296                                             cache->key.objectid +
297                                             cache->key.offset - start);
298                         }
299
300                         cache->bytes_super += len;
301                         ret = add_excluded_extent(root, start, len);
302                         if (ret) {
303                                 kfree(logical);
304                                 return ret;
305                         }
306                 }
307
308                 kfree(logical);
309         }
310         return 0;
311 }
312
313 static struct btrfs_caching_control *
314 get_caching_control(struct btrfs_block_group_cache *cache)
315 {
316         struct btrfs_caching_control *ctl;
317
318         spin_lock(&cache->lock);
319         if (!cache->caching_ctl) {
320                 spin_unlock(&cache->lock);
321                 return NULL;
322         }
323
324         ctl = cache->caching_ctl;
325         atomic_inc(&ctl->count);
326         spin_unlock(&cache->lock);
327         return ctl;
328 }
329
330 static void put_caching_control(struct btrfs_caching_control *ctl)
331 {
332         if (atomic_dec_and_test(&ctl->count))
333                 kfree(ctl);
334 }
335
336 /*
337  * this is only called by cache_block_group, since we could have freed extents
338  * we need to check the pinned_extents for any extents that can't be used yet
339  * since their free space will be released as soon as the transaction commits.
340  */
341 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
342                               struct btrfs_fs_info *info, u64 start, u64 end)
343 {
344         u64 extent_start, extent_end, size, total_added = 0;
345         int ret;
346
347         while (start < end) {
348                 ret = find_first_extent_bit(info->pinned_extents, start,
349                                             &extent_start, &extent_end,
350                                             EXTENT_DIRTY | EXTENT_UPTODATE,
351                                             NULL);
352                 if (ret)
353                         break;
354
355                 if (extent_start <= start) {
356                         start = extent_end + 1;
357                 } else if (extent_start > start && extent_start < end) {
358                         size = extent_start - start;
359                         total_added += size;
360                         ret = btrfs_add_free_space(block_group, start,
361                                                    size);
362                         BUG_ON(ret); /* -ENOMEM or logic error */
363                         start = extent_end + 1;
364                 } else {
365                         break;
366                 }
367         }
368
369         if (start < end) {
370                 size = end - start;
371                 total_added += size;
372                 ret = btrfs_add_free_space(block_group, start, size);
373                 BUG_ON(ret); /* -ENOMEM or logic error */
374         }
375
376         return total_added;
377 }
378
379 static noinline void caching_thread(struct btrfs_work *work)
380 {
381         struct btrfs_block_group_cache *block_group;
382         struct btrfs_fs_info *fs_info;
383         struct btrfs_caching_control *caching_ctl;
384         struct btrfs_root *extent_root;
385         struct btrfs_path *path;
386         struct extent_buffer *leaf;
387         struct btrfs_key key;
388         u64 total_found = 0;
389         u64 last = 0;
390         u32 nritems;
391         int ret = -ENOMEM;
392
393         caching_ctl = container_of(work, struct btrfs_caching_control, work);
394         block_group = caching_ctl->block_group;
395         fs_info = block_group->fs_info;
396         extent_root = fs_info->extent_root;
397
398         path = btrfs_alloc_path();
399         if (!path)
400                 goto out;
401
402         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
403
404         /*
405          * We don't want to deadlock with somebody trying to allocate a new
406          * extent for the extent root while also trying to search the extent
407          * root to add free space.  So we skip locking and search the commit
408          * root, since its read-only
409          */
410         path->skip_locking = 1;
411         path->search_commit_root = 1;
412         path->reada = 1;
413
414         key.objectid = last;
415         key.offset = 0;
416         key.type = BTRFS_EXTENT_ITEM_KEY;
417 again:
418         mutex_lock(&caching_ctl->mutex);
419         /* need to make sure the commit_root doesn't disappear */
420         down_read(&fs_info->commit_root_sem);
421
422 next:
423         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
424         if (ret < 0)
425                 goto err;
426
427         leaf = path->nodes[0];
428         nritems = btrfs_header_nritems(leaf);
429
430         while (1) {
431                 if (btrfs_fs_closing(fs_info) > 1) {
432                         last = (u64)-1;
433                         break;
434                 }
435
436                 if (path->slots[0] < nritems) {
437                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
438                 } else {
439                         ret = find_next_key(path, 0, &key);
440                         if (ret)
441                                 break;
442
443                         if (need_resched() ||
444                             rwsem_is_contended(&fs_info->commit_root_sem)) {
445                                 caching_ctl->progress = last;
446                                 btrfs_release_path(path);
447                                 up_read(&fs_info->commit_root_sem);
448                                 mutex_unlock(&caching_ctl->mutex);
449                                 cond_resched();
450                                 goto again;
451                         }
452
453                         ret = btrfs_next_leaf(extent_root, path);
454                         if (ret < 0)
455                                 goto err;
456                         if (ret)
457                                 break;
458                         leaf = path->nodes[0];
459                         nritems = btrfs_header_nritems(leaf);
460                         continue;
461                 }
462
463                 if (key.objectid < last) {
464                         key.objectid = last;
465                         key.offset = 0;
466                         key.type = BTRFS_EXTENT_ITEM_KEY;
467
468                         caching_ctl->progress = last;
469                         btrfs_release_path(path);
470                         goto next;
471                 }
472
473                 if (key.objectid < block_group->key.objectid) {
474                         path->slots[0]++;
475                         continue;
476                 }
477
478                 if (key.objectid >= block_group->key.objectid +
479                     block_group->key.offset)
480                         break;
481
482                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
483                     key.type == BTRFS_METADATA_ITEM_KEY) {
484                         total_found += add_new_free_space(block_group,
485                                                           fs_info, last,
486                                                           key.objectid);
487                         if (key.type == BTRFS_METADATA_ITEM_KEY)
488                                 last = key.objectid +
489                                         fs_info->tree_root->nodesize;
490                         else
491                                 last = key.objectid + key.offset;
492
493                         if (total_found > (1024 * 1024 * 2)) {
494                                 total_found = 0;
495                                 wake_up(&caching_ctl->wait);
496                         }
497                 }
498                 path->slots[0]++;
499         }
500         ret = 0;
501
502         total_found += add_new_free_space(block_group, fs_info, last,
503                                           block_group->key.objectid +
504                                           block_group->key.offset);
505         caching_ctl->progress = (u64)-1;
506
507         spin_lock(&block_group->lock);
508         block_group->caching_ctl = NULL;
509         block_group->cached = BTRFS_CACHE_FINISHED;
510         spin_unlock(&block_group->lock);
511
512 err:
513         btrfs_free_path(path);
514         up_read(&fs_info->commit_root_sem);
515
516         free_excluded_extents(extent_root, block_group);
517
518         mutex_unlock(&caching_ctl->mutex);
519 out:
520         if (ret) {
521                 spin_lock(&block_group->lock);
522                 block_group->caching_ctl = NULL;
523                 block_group->cached = BTRFS_CACHE_ERROR;
524                 spin_unlock(&block_group->lock);
525         }
526         wake_up(&caching_ctl->wait);
527
528         put_caching_control(caching_ctl);
529         btrfs_put_block_group(block_group);
530 }
531
532 static int cache_block_group(struct btrfs_block_group_cache *cache,
533                              int load_cache_only)
534 {
535         DEFINE_WAIT(wait);
536         struct btrfs_fs_info *fs_info = cache->fs_info;
537         struct btrfs_caching_control *caching_ctl;
538         int ret = 0;
539
540         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
541         if (!caching_ctl)
542                 return -ENOMEM;
543
544         INIT_LIST_HEAD(&caching_ctl->list);
545         mutex_init(&caching_ctl->mutex);
546         init_waitqueue_head(&caching_ctl->wait);
547         caching_ctl->block_group = cache;
548         caching_ctl->progress = cache->key.objectid;
549         atomic_set(&caching_ctl->count, 1);
550         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
551                         caching_thread, NULL, NULL);
552
553         spin_lock(&cache->lock);
554         /*
555          * This should be a rare occasion, but this could happen I think in the
556          * case where one thread starts to load the space cache info, and then
557          * some other thread starts a transaction commit which tries to do an
558          * allocation while the other thread is still loading the space cache
559          * info.  The previous loop should have kept us from choosing this block
560          * group, but if we've moved to the state where we will wait on caching
561          * block groups we need to first check if we're doing a fast load here,
562          * so we can wait for it to finish, otherwise we could end up allocating
563          * from a block group who's cache gets evicted for one reason or
564          * another.
565          */
566         while (cache->cached == BTRFS_CACHE_FAST) {
567                 struct btrfs_caching_control *ctl;
568
569                 ctl = cache->caching_ctl;
570                 atomic_inc(&ctl->count);
571                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572                 spin_unlock(&cache->lock);
573
574                 schedule();
575
576                 finish_wait(&ctl->wait, &wait);
577                 put_caching_control(ctl);
578                 spin_lock(&cache->lock);
579         }
580
581         if (cache->cached != BTRFS_CACHE_NO) {
582                 spin_unlock(&cache->lock);
583                 kfree(caching_ctl);
584                 return 0;
585         }
586         WARN_ON(cache->caching_ctl);
587         cache->caching_ctl = caching_ctl;
588         cache->cached = BTRFS_CACHE_FAST;
589         spin_unlock(&cache->lock);
590
591         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592                 mutex_lock(&caching_ctl->mutex);
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                         caching_ctl->progress = (u64)-1;
601                 } else {
602                         if (load_cache_only) {
603                                 cache->caching_ctl = NULL;
604                                 cache->cached = BTRFS_CACHE_NO;
605                         } else {
606                                 cache->cached = BTRFS_CACHE_STARTED;
607                                 cache->has_caching_ctl = 1;
608                         }
609                 }
610                 spin_unlock(&cache->lock);
611                 mutex_unlock(&caching_ctl->mutex);
612
613                 wake_up(&caching_ctl->wait);
614                 if (ret == 1) {
615                         put_caching_control(caching_ctl);
616                         free_excluded_extents(fs_info->extent_root, cache);
617                         return 0;
618                 }
619         } else {
620                 /*
621                  * We are not going to do the fast caching, set cached to the
622                  * appropriate value and wakeup any waiters.
623                  */
624                 spin_lock(&cache->lock);
625                 if (load_cache_only) {
626                         cache->caching_ctl = NULL;
627                         cache->cached = BTRFS_CACHE_NO;
628                 } else {
629                         cache->cached = BTRFS_CACHE_STARTED;
630                         cache->has_caching_ctl = 1;
631                 }
632                 spin_unlock(&cache->lock);
633                 wake_up(&caching_ctl->wait);
634         }
635
636         if (load_cache_only) {
637                 put_caching_control(caching_ctl);
638                 return 0;
639         }
640
641         down_write(&fs_info->commit_root_sem);
642         atomic_inc(&caching_ctl->count);
643         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
644         up_write(&fs_info->commit_root_sem);
645
646         btrfs_get_block_group(cache);
647
648         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
649
650         return ret;
651 }
652
653 /*
654  * return the block group that starts at or after bytenr
655  */
656 static struct btrfs_block_group_cache *
657 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
658 {
659         struct btrfs_block_group_cache *cache;
660
661         cache = block_group_cache_tree_search(info, bytenr, 0);
662
663         return cache;
664 }
665
666 /*
667  * return the block group that contains the given bytenr
668  */
669 struct btrfs_block_group_cache *btrfs_lookup_block_group(
670                                                  struct btrfs_fs_info *info,
671                                                  u64 bytenr)
672 {
673         struct btrfs_block_group_cache *cache;
674
675         cache = block_group_cache_tree_search(info, bytenr, 1);
676
677         return cache;
678 }
679
680 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
681                                                   u64 flags)
682 {
683         struct list_head *head = &info->space_info;
684         struct btrfs_space_info *found;
685
686         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
687
688         rcu_read_lock();
689         list_for_each_entry_rcu(found, head, list) {
690                 if (found->flags & flags) {
691                         rcu_read_unlock();
692                         return found;
693                 }
694         }
695         rcu_read_unlock();
696         return NULL;
697 }
698
699 /*
700  * after adding space to the filesystem, we need to clear the full flags
701  * on all the space infos.
702  */
703 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
704 {
705         struct list_head *head = &info->space_info;
706         struct btrfs_space_info *found;
707
708         rcu_read_lock();
709         list_for_each_entry_rcu(found, head, list)
710                 found->full = 0;
711         rcu_read_unlock();
712 }
713
714 /* simple helper to search for an existing data extent at a given offset */
715 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
716 {
717         int ret;
718         struct btrfs_key key;
719         struct btrfs_path *path;
720
721         path = btrfs_alloc_path();
722         if (!path)
723                 return -ENOMEM;
724
725         key.objectid = start;
726         key.offset = len;
727         key.type = BTRFS_EXTENT_ITEM_KEY;
728         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
729                                 0, 0);
730         btrfs_free_path(path);
731         return ret;
732 }
733
734 /*
735  * helper function to lookup reference count and flags of a tree block.
736  *
737  * the head node for delayed ref is used to store the sum of all the
738  * reference count modifications queued up in the rbtree. the head
739  * node may also store the extent flags to set. This way you can check
740  * to see what the reference count and extent flags would be if all of
741  * the delayed refs are not processed.
742  */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744                              struct btrfs_root *root, u64 bytenr,
745                              u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747         struct btrfs_delayed_ref_head *head;
748         struct btrfs_delayed_ref_root *delayed_refs;
749         struct btrfs_path *path;
750         struct btrfs_extent_item *ei;
751         struct extent_buffer *leaf;
752         struct btrfs_key key;
753         u32 item_size;
754         u64 num_refs;
755         u64 extent_flags;
756         int ret;
757
758         /*
759          * If we don't have skinny metadata, don't bother doing anything
760          * different
761          */
762         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763                 offset = root->nodesize;
764                 metadata = 0;
765         }
766
767         path = btrfs_alloc_path();
768         if (!path)
769                 return -ENOMEM;
770
771         if (!trans) {
772                 path->skip_locking = 1;
773                 path->search_commit_root = 1;
774         }
775
776 search_again:
777         key.objectid = bytenr;
778         key.offset = offset;
779         if (metadata)
780                 key.type = BTRFS_METADATA_ITEM_KEY;
781         else
782                 key.type = BTRFS_EXTENT_ITEM_KEY;
783
784         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
785                                 &key, path, 0, 0);
786         if (ret < 0)
787                 goto out_free;
788
789         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
790                 if (path->slots[0]) {
791                         path->slots[0]--;
792                         btrfs_item_key_to_cpu(path->nodes[0], &key,
793                                               path->slots[0]);
794                         if (key.objectid == bytenr &&
795                             key.type == BTRFS_EXTENT_ITEM_KEY &&
796                             key.offset == root->nodesize)
797                                 ret = 0;
798                 }
799         }
800
801         if (ret == 0) {
802                 leaf = path->nodes[0];
803                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
804                 if (item_size >= sizeof(*ei)) {
805                         ei = btrfs_item_ptr(leaf, path->slots[0],
806                                             struct btrfs_extent_item);
807                         num_refs = btrfs_extent_refs(leaf, ei);
808                         extent_flags = btrfs_extent_flags(leaf, ei);
809                 } else {
810 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
811                         struct btrfs_extent_item_v0 *ei0;
812                         BUG_ON(item_size != sizeof(*ei0));
813                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
814                                              struct btrfs_extent_item_v0);
815                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
816                         /* FIXME: this isn't correct for data */
817                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
818 #else
819                         BUG();
820 #endif
821                 }
822                 BUG_ON(num_refs == 0);
823         } else {
824                 num_refs = 0;
825                 extent_flags = 0;
826                 ret = 0;
827         }
828
829         if (!trans)
830                 goto out;
831
832         delayed_refs = &trans->transaction->delayed_refs;
833         spin_lock(&delayed_refs->lock);
834         head = btrfs_find_delayed_ref_head(trans, bytenr);
835         if (head) {
836                 if (!mutex_trylock(&head->mutex)) {
837                         atomic_inc(&head->node.refs);
838                         spin_unlock(&delayed_refs->lock);
839
840                         btrfs_release_path(path);
841
842                         /*
843                          * Mutex was contended, block until it's released and try
844                          * again
845                          */
846                         mutex_lock(&head->mutex);
847                         mutex_unlock(&head->mutex);
848                         btrfs_put_delayed_ref(&head->node);
849                         goto search_again;
850                 }
851                 spin_lock(&head->lock);
852                 if (head->extent_op && head->extent_op->update_flags)
853                         extent_flags |= head->extent_op->flags_to_set;
854                 else
855                         BUG_ON(num_refs == 0);
856
857                 num_refs += head->node.ref_mod;
858                 spin_unlock(&head->lock);
859                 mutex_unlock(&head->mutex);
860         }
861         spin_unlock(&delayed_refs->lock);
862 out:
863         WARN_ON(num_refs == 0);
864         if (refs)
865                 *refs = num_refs;
866         if (flags)
867                 *flags = extent_flags;
868 out_free:
869         btrfs_free_path(path);
870         return ret;
871 }
872
873 /*
874  * Back reference rules.  Back refs have three main goals:
875  *
876  * 1) differentiate between all holders of references to an extent so that
877  *    when a reference is dropped we can make sure it was a valid reference
878  *    before freeing the extent.
879  *
880  * 2) Provide enough information to quickly find the holders of an extent
881  *    if we notice a given block is corrupted or bad.
882  *
883  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
884  *    maintenance.  This is actually the same as #2, but with a slightly
885  *    different use case.
886  *
887  * There are two kinds of back refs. The implicit back refs is optimized
888  * for pointers in non-shared tree blocks. For a given pointer in a block,
889  * back refs of this kind provide information about the block's owner tree
890  * and the pointer's key. These information allow us to find the block by
891  * b-tree searching. The full back refs is for pointers in tree blocks not
892  * referenced by their owner trees. The location of tree block is recorded
893  * in the back refs. Actually the full back refs is generic, and can be
894  * used in all cases the implicit back refs is used. The major shortcoming
895  * of the full back refs is its overhead. Every time a tree block gets
896  * COWed, we have to update back refs entry for all pointers in it.
897  *
898  * For a newly allocated tree block, we use implicit back refs for
899  * pointers in it. This means most tree related operations only involve
900  * implicit back refs. For a tree block created in old transaction, the
901  * only way to drop a reference to it is COW it. So we can detect the
902  * event that tree block loses its owner tree's reference and do the
903  * back refs conversion.
904  *
905  * When a tree block is COW'd through a tree, there are four cases:
906  *
907  * The reference count of the block is one and the tree is the block's
908  * owner tree. Nothing to do in this case.
909  *
910  * The reference count of the block is one and the tree is not the
911  * block's owner tree. In this case, full back refs is used for pointers
912  * in the block. Remove these full back refs, add implicit back refs for
913  * every pointers in the new block.
914  *
915  * The reference count of the block is greater than one and the tree is
916  * the block's owner tree. In this case, implicit back refs is used for
917  * pointers in the block. Add full back refs for every pointers in the
918  * block, increase lower level extents' reference counts. The original
919  * implicit back refs are entailed to the new block.
920  *
921  * The reference count of the block is greater than one and the tree is
922  * not the block's owner tree. Add implicit back refs for every pointer in
923  * the new block, increase lower level extents' reference count.
924  *
925  * Back Reference Key composing:
926  *
927  * The key objectid corresponds to the first byte in the extent,
928  * The key type is used to differentiate between types of back refs.
929  * There are different meanings of the key offset for different types
930  * of back refs.
931  *
932  * File extents can be referenced by:
933  *
934  * - multiple snapshots, subvolumes, or different generations in one subvol
935  * - different files inside a single subvolume
936  * - different offsets inside a file (bookend extents in file.c)
937  *
938  * The extent ref structure for the implicit back refs has fields for:
939  *
940  * - Objectid of the subvolume root
941  * - objectid of the file holding the reference
942  * - original offset in the file
943  * - how many bookend extents
944  *
945  * The key offset for the implicit back refs is hash of the first
946  * three fields.
947  *
948  * The extent ref structure for the full back refs has field for:
949  *
950  * - number of pointers in the tree leaf
951  *
952  * The key offset for the implicit back refs is the first byte of
953  * the tree leaf
954  *
955  * When a file extent is allocated, The implicit back refs is used.
956  * the fields are filled in:
957  *
958  *     (root_key.objectid, inode objectid, offset in file, 1)
959  *
960  * When a file extent is removed file truncation, we find the
961  * corresponding implicit back refs and check the following fields:
962  *
963  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
964  *
965  * Btree extents can be referenced by:
966  *
967  * - Different subvolumes
968  *
969  * Both the implicit back refs and the full back refs for tree blocks
970  * only consist of key. The key offset for the implicit back refs is
971  * objectid of block's owner tree. The key offset for the full back refs
972  * is the first byte of parent block.
973  *
974  * When implicit back refs is used, information about the lowest key and
975  * level of the tree block are required. These information are stored in
976  * tree block info structure.
977  */
978
979 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
980 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
981                                   struct btrfs_root *root,
982                                   struct btrfs_path *path,
983                                   u64 owner, u32 extra_size)
984 {
985         struct btrfs_extent_item *item;
986         struct btrfs_extent_item_v0 *ei0;
987         struct btrfs_extent_ref_v0 *ref0;
988         struct btrfs_tree_block_info *bi;
989         struct extent_buffer *leaf;
990         struct btrfs_key key;
991         struct btrfs_key found_key;
992         u32 new_size = sizeof(*item);
993         u64 refs;
994         int ret;
995
996         leaf = path->nodes[0];
997         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
998
999         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1000         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1001                              struct btrfs_extent_item_v0);
1002         refs = btrfs_extent_refs_v0(leaf, ei0);
1003
1004         if (owner == (u64)-1) {
1005                 while (1) {
1006                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1007                                 ret = btrfs_next_leaf(root, path);
1008                                 if (ret < 0)
1009                                         return ret;
1010                                 BUG_ON(ret > 0); /* Corruption */
1011                                 leaf = path->nodes[0];
1012                         }
1013                         btrfs_item_key_to_cpu(leaf, &found_key,
1014                                               path->slots[0]);
1015                         BUG_ON(key.objectid != found_key.objectid);
1016                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1017                                 path->slots[0]++;
1018                                 continue;
1019                         }
1020                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1021                                               struct btrfs_extent_ref_v0);
1022                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1023                         break;
1024                 }
1025         }
1026         btrfs_release_path(path);
1027
1028         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1029                 new_size += sizeof(*bi);
1030
1031         new_size -= sizeof(*ei0);
1032         ret = btrfs_search_slot(trans, root, &key, path,
1033                                 new_size + extra_size, 1);
1034         if (ret < 0)
1035                 return ret;
1036         BUG_ON(ret); /* Corruption */
1037
1038         btrfs_extend_item(root, path, new_size);
1039
1040         leaf = path->nodes[0];
1041         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1042         btrfs_set_extent_refs(leaf, item, refs);
1043         /* FIXME: get real generation */
1044         btrfs_set_extent_generation(leaf, item, 0);
1045         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1046                 btrfs_set_extent_flags(leaf, item,
1047                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1048                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1049                 bi = (struct btrfs_tree_block_info *)(item + 1);
1050                 /* FIXME: get first key of the block */
1051                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1052                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1053         } else {
1054                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1055         }
1056         btrfs_mark_buffer_dirty(leaf);
1057         return 0;
1058 }
1059 #endif
1060
1061 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1062 {
1063         u32 high_crc = ~(u32)0;
1064         u32 low_crc = ~(u32)0;
1065         __le64 lenum;
1066
1067         lenum = cpu_to_le64(root_objectid);
1068         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1069         lenum = cpu_to_le64(owner);
1070         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1071         lenum = cpu_to_le64(offset);
1072         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1073
1074         return ((u64)high_crc << 31) ^ (u64)low_crc;
1075 }
1076
1077 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1078                                      struct btrfs_extent_data_ref *ref)
1079 {
1080         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1081                                     btrfs_extent_data_ref_objectid(leaf, ref),
1082                                     btrfs_extent_data_ref_offset(leaf, ref));
1083 }
1084
1085 static int match_extent_data_ref(struct extent_buffer *leaf,
1086                                  struct btrfs_extent_data_ref *ref,
1087                                  u64 root_objectid, u64 owner, u64 offset)
1088 {
1089         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1090             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1091             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1092                 return 0;
1093         return 1;
1094 }
1095
1096 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1097                                            struct btrfs_root *root,
1098                                            struct btrfs_path *path,
1099                                            u64 bytenr, u64 parent,
1100                                            u64 root_objectid,
1101                                            u64 owner, u64 offset)
1102 {
1103         struct btrfs_key key;
1104         struct btrfs_extent_data_ref *ref;
1105         struct extent_buffer *leaf;
1106         u32 nritems;
1107         int ret;
1108         int recow;
1109         int err = -ENOENT;
1110
1111         key.objectid = bytenr;
1112         if (parent) {
1113                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1114                 key.offset = parent;
1115         } else {
1116                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1117                 key.offset = hash_extent_data_ref(root_objectid,
1118                                                   owner, offset);
1119         }
1120 again:
1121         recow = 0;
1122         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1123         if (ret < 0) {
1124                 err = ret;
1125                 goto fail;
1126         }
1127
1128         if (parent) {
1129                 if (!ret)
1130                         return 0;
1131 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1133                 btrfs_release_path(path);
1134                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1135                 if (ret < 0) {
1136                         err = ret;
1137                         goto fail;
1138                 }
1139                 if (!ret)
1140                         return 0;
1141 #endif
1142                 goto fail;
1143         }
1144
1145         leaf = path->nodes[0];
1146         nritems = btrfs_header_nritems(leaf);
1147         while (1) {
1148                 if (path->slots[0] >= nritems) {
1149                         ret = btrfs_next_leaf(root, path);
1150                         if (ret < 0)
1151                                 err = ret;
1152                         if (ret)
1153                                 goto fail;
1154
1155                         leaf = path->nodes[0];
1156                         nritems = btrfs_header_nritems(leaf);
1157                         recow = 1;
1158                 }
1159
1160                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161                 if (key.objectid != bytenr ||
1162                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1163                         goto fail;
1164
1165                 ref = btrfs_item_ptr(leaf, path->slots[0],
1166                                      struct btrfs_extent_data_ref);
1167
1168                 if (match_extent_data_ref(leaf, ref, root_objectid,
1169                                           owner, offset)) {
1170                         if (recow) {
1171                                 btrfs_release_path(path);
1172                                 goto again;
1173                         }
1174                         err = 0;
1175                         break;
1176                 }
1177                 path->slots[0]++;
1178         }
1179 fail:
1180         return err;
1181 }
1182
1183 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1184                                            struct btrfs_root *root,
1185                                            struct btrfs_path *path,
1186                                            u64 bytenr, u64 parent,
1187                                            u64 root_objectid, u64 owner,
1188                                            u64 offset, int refs_to_add)
1189 {
1190         struct btrfs_key key;
1191         struct extent_buffer *leaf;
1192         u32 size;
1193         u32 num_refs;
1194         int ret;
1195
1196         key.objectid = bytenr;
1197         if (parent) {
1198                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1199                 key.offset = parent;
1200                 size = sizeof(struct btrfs_shared_data_ref);
1201         } else {
1202                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1203                 key.offset = hash_extent_data_ref(root_objectid,
1204                                                   owner, offset);
1205                 size = sizeof(struct btrfs_extent_data_ref);
1206         }
1207
1208         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1209         if (ret && ret != -EEXIST)
1210                 goto fail;
1211
1212         leaf = path->nodes[0];
1213         if (parent) {
1214                 struct btrfs_shared_data_ref *ref;
1215                 ref = btrfs_item_ptr(leaf, path->slots[0],
1216                                      struct btrfs_shared_data_ref);
1217                 if (ret == 0) {
1218                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1219                 } else {
1220                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1221                         num_refs += refs_to_add;
1222                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1223                 }
1224         } else {
1225                 struct btrfs_extent_data_ref *ref;
1226                 while (ret == -EEXIST) {
1227                         ref = btrfs_item_ptr(leaf, path->slots[0],
1228                                              struct btrfs_extent_data_ref);
1229                         if (match_extent_data_ref(leaf, ref, root_objectid,
1230                                                   owner, offset))
1231                                 break;
1232                         btrfs_release_path(path);
1233                         key.offset++;
1234                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1235                                                       size);
1236                         if (ret && ret != -EEXIST)
1237                                 goto fail;
1238
1239                         leaf = path->nodes[0];
1240                 }
1241                 ref = btrfs_item_ptr(leaf, path->slots[0],
1242                                      struct btrfs_extent_data_ref);
1243                 if (ret == 0) {
1244                         btrfs_set_extent_data_ref_root(leaf, ref,
1245                                                        root_objectid);
1246                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1247                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1248                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1249                 } else {
1250                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1251                         num_refs += refs_to_add;
1252                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1253                 }
1254         }
1255         btrfs_mark_buffer_dirty(leaf);
1256         ret = 0;
1257 fail:
1258         btrfs_release_path(path);
1259         return ret;
1260 }
1261
1262 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1263                                            struct btrfs_root *root,
1264                                            struct btrfs_path *path,
1265                                            int refs_to_drop, int *last_ref)
1266 {
1267         struct btrfs_key key;
1268         struct btrfs_extent_data_ref *ref1 = NULL;
1269         struct btrfs_shared_data_ref *ref2 = NULL;
1270         struct extent_buffer *leaf;
1271         u32 num_refs = 0;
1272         int ret = 0;
1273
1274         leaf = path->nodes[0];
1275         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1276
1277         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1278                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1279                                       struct btrfs_extent_data_ref);
1280                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1281         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1282                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1283                                       struct btrfs_shared_data_ref);
1284                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1285 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1286         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1287                 struct btrfs_extent_ref_v0 *ref0;
1288                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1289                                       struct btrfs_extent_ref_v0);
1290                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1291 #endif
1292         } else {
1293                 BUG();
1294         }
1295
1296         BUG_ON(num_refs < refs_to_drop);
1297         num_refs -= refs_to_drop;
1298
1299         if (num_refs == 0) {
1300                 ret = btrfs_del_item(trans, root, path);
1301                 *last_ref = 1;
1302         } else {
1303                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1304                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1305                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1306                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1307 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1308                 else {
1309                         struct btrfs_extent_ref_v0 *ref0;
1310                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1311                                         struct btrfs_extent_ref_v0);
1312                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1313                 }
1314 #endif
1315                 btrfs_mark_buffer_dirty(leaf);
1316         }
1317         return ret;
1318 }
1319
1320 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1321                                           struct btrfs_path *path,
1322                                           struct btrfs_extent_inline_ref *iref)
1323 {
1324         struct btrfs_key key;
1325         struct extent_buffer *leaf;
1326         struct btrfs_extent_data_ref *ref1;
1327         struct btrfs_shared_data_ref *ref2;
1328         u32 num_refs = 0;
1329
1330         leaf = path->nodes[0];
1331         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1332         if (iref) {
1333                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1334                     BTRFS_EXTENT_DATA_REF_KEY) {
1335                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1336                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1337                 } else {
1338                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1339                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1340                 }
1341         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1342                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1343                                       struct btrfs_extent_data_ref);
1344                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1346                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1347                                       struct btrfs_shared_data_ref);
1348                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1349 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1350         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1351                 struct btrfs_extent_ref_v0 *ref0;
1352                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1353                                       struct btrfs_extent_ref_v0);
1354                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1355 #endif
1356         } else {
1357                 WARN_ON(1);
1358         }
1359         return num_refs;
1360 }
1361
1362 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1363                                           struct btrfs_root *root,
1364                                           struct btrfs_path *path,
1365                                           u64 bytenr, u64 parent,
1366                                           u64 root_objectid)
1367 {
1368         struct btrfs_key key;
1369         int ret;
1370
1371         key.objectid = bytenr;
1372         if (parent) {
1373                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1374                 key.offset = parent;
1375         } else {
1376                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1377                 key.offset = root_objectid;
1378         }
1379
1380         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1381         if (ret > 0)
1382                 ret = -ENOENT;
1383 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1384         if (ret == -ENOENT && parent) {
1385                 btrfs_release_path(path);
1386                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1387                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388                 if (ret > 0)
1389                         ret = -ENOENT;
1390         }
1391 #endif
1392         return ret;
1393 }
1394
1395 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1396                                           struct btrfs_root *root,
1397                                           struct btrfs_path *path,
1398                                           u64 bytenr, u64 parent,
1399                                           u64 root_objectid)
1400 {
1401         struct btrfs_key key;
1402         int ret;
1403
1404         key.objectid = bytenr;
1405         if (parent) {
1406                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1407                 key.offset = parent;
1408         } else {
1409                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1410                 key.offset = root_objectid;
1411         }
1412
1413         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1414         btrfs_release_path(path);
1415         return ret;
1416 }
1417
1418 static inline int extent_ref_type(u64 parent, u64 owner)
1419 {
1420         int type;
1421         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1422                 if (parent > 0)
1423                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1424                 else
1425                         type = BTRFS_TREE_BLOCK_REF_KEY;
1426         } else {
1427                 if (parent > 0)
1428                         type = BTRFS_SHARED_DATA_REF_KEY;
1429                 else
1430                         type = BTRFS_EXTENT_DATA_REF_KEY;
1431         }
1432         return type;
1433 }
1434
1435 static int find_next_key(struct btrfs_path *path, int level,
1436                          struct btrfs_key *key)
1437
1438 {
1439         for (; level < BTRFS_MAX_LEVEL; level++) {
1440                 if (!path->nodes[level])
1441                         break;
1442                 if (path->slots[level] + 1 >=
1443                     btrfs_header_nritems(path->nodes[level]))
1444                         continue;
1445                 if (level == 0)
1446                         btrfs_item_key_to_cpu(path->nodes[level], key,
1447                                               path->slots[level] + 1);
1448                 else
1449                         btrfs_node_key_to_cpu(path->nodes[level], key,
1450                                               path->slots[level] + 1);
1451                 return 0;
1452         }
1453         return 1;
1454 }
1455
1456 /*
1457  * look for inline back ref. if back ref is found, *ref_ret is set
1458  * to the address of inline back ref, and 0 is returned.
1459  *
1460  * if back ref isn't found, *ref_ret is set to the address where it
1461  * should be inserted, and -ENOENT is returned.
1462  *
1463  * if insert is true and there are too many inline back refs, the path
1464  * points to the extent item, and -EAGAIN is returned.
1465  *
1466  * NOTE: inline back refs are ordered in the same way that back ref
1467  *       items in the tree are ordered.
1468  */
1469 static noinline_for_stack
1470 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1471                                  struct btrfs_root *root,
1472                                  struct btrfs_path *path,
1473                                  struct btrfs_extent_inline_ref **ref_ret,
1474                                  u64 bytenr, u64 num_bytes,
1475                                  u64 parent, u64 root_objectid,
1476                                  u64 owner, u64 offset, int insert)
1477 {
1478         struct btrfs_key key;
1479         struct extent_buffer *leaf;
1480         struct btrfs_extent_item *ei;
1481         struct btrfs_extent_inline_ref *iref;
1482         u64 flags;
1483         u64 item_size;
1484         unsigned long ptr;
1485         unsigned long end;
1486         int extra_size;
1487         int type;
1488         int want;
1489         int ret;
1490         int err = 0;
1491         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1492                                                  SKINNY_METADATA);
1493
1494         key.objectid = bytenr;
1495         key.type = BTRFS_EXTENT_ITEM_KEY;
1496         key.offset = num_bytes;
1497
1498         want = extent_ref_type(parent, owner);
1499         if (insert) {
1500                 extra_size = btrfs_extent_inline_ref_size(want);
1501                 path->keep_locks = 1;
1502         } else
1503                 extra_size = -1;
1504
1505         /*
1506          * Owner is our parent level, so we can just add one to get the level
1507          * for the block we are interested in.
1508          */
1509         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1510                 key.type = BTRFS_METADATA_ITEM_KEY;
1511                 key.offset = owner;
1512         }
1513
1514 again:
1515         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1516         if (ret < 0) {
1517                 err = ret;
1518                 goto out;
1519         }
1520
1521         /*
1522          * We may be a newly converted file system which still has the old fat
1523          * extent entries for metadata, so try and see if we have one of those.
1524          */
1525         if (ret > 0 && skinny_metadata) {
1526                 skinny_metadata = false;
1527                 if (path->slots[0]) {
1528                         path->slots[0]--;
1529                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1530                                               path->slots[0]);
1531                         if (key.objectid == bytenr &&
1532                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1533                             key.offset == num_bytes)
1534                                 ret = 0;
1535                 }
1536                 if (ret) {
1537                         key.objectid = bytenr;
1538                         key.type = BTRFS_EXTENT_ITEM_KEY;
1539                         key.offset = num_bytes;
1540                         btrfs_release_path(path);
1541                         goto again;
1542                 }
1543         }
1544
1545         if (ret && !insert) {
1546                 err = -ENOENT;
1547                 goto out;
1548         } else if (WARN_ON(ret)) {
1549                 err = -EIO;
1550                 goto out;
1551         }
1552
1553         leaf = path->nodes[0];
1554         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1555 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1556         if (item_size < sizeof(*ei)) {
1557                 if (!insert) {
1558                         err = -ENOENT;
1559                         goto out;
1560                 }
1561                 ret = convert_extent_item_v0(trans, root, path, owner,
1562                                              extra_size);
1563                 if (ret < 0) {
1564                         err = ret;
1565                         goto out;
1566                 }
1567                 leaf = path->nodes[0];
1568                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1569         }
1570 #endif
1571         BUG_ON(item_size < sizeof(*ei));
1572
1573         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1574         flags = btrfs_extent_flags(leaf, ei);
1575
1576         ptr = (unsigned long)(ei + 1);
1577         end = (unsigned long)ei + item_size;
1578
1579         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1580                 ptr += sizeof(struct btrfs_tree_block_info);
1581                 BUG_ON(ptr > end);
1582         }
1583
1584         err = -ENOENT;
1585         while (1) {
1586                 if (ptr >= end) {
1587                         WARN_ON(ptr > end);
1588                         break;
1589                 }
1590                 iref = (struct btrfs_extent_inline_ref *)ptr;
1591                 type = btrfs_extent_inline_ref_type(leaf, iref);
1592                 if (want < type)
1593                         break;
1594                 if (want > type) {
1595                         ptr += btrfs_extent_inline_ref_size(type);
1596                         continue;
1597                 }
1598
1599                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1600                         struct btrfs_extent_data_ref *dref;
1601                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1602                         if (match_extent_data_ref(leaf, dref, root_objectid,
1603                                                   owner, offset)) {
1604                                 err = 0;
1605                                 break;
1606                         }
1607                         if (hash_extent_data_ref_item(leaf, dref) <
1608                             hash_extent_data_ref(root_objectid, owner, offset))
1609                                 break;
1610                 } else {
1611                         u64 ref_offset;
1612                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1613                         if (parent > 0) {
1614                                 if (parent == ref_offset) {
1615                                         err = 0;
1616                                         break;
1617                                 }
1618                                 if (ref_offset < parent)
1619                                         break;
1620                         } else {
1621                                 if (root_objectid == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < root_objectid)
1626                                         break;
1627                         }
1628                 }
1629                 ptr += btrfs_extent_inline_ref_size(type);
1630         }
1631         if (err == -ENOENT && insert) {
1632                 if (item_size + extra_size >=
1633                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1634                         err = -EAGAIN;
1635                         goto out;
1636                 }
1637                 /*
1638                  * To add new inline back ref, we have to make sure
1639                  * there is no corresponding back ref item.
1640                  * For simplicity, we just do not add new inline back
1641                  * ref if there is any kind of item for this block
1642                  */
1643                 if (find_next_key(path, 0, &key) == 0 &&
1644                     key.objectid == bytenr &&
1645                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1646                         err = -EAGAIN;
1647                         goto out;
1648                 }
1649         }
1650         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1651 out:
1652         if (insert) {
1653                 path->keep_locks = 0;
1654                 btrfs_unlock_up_safe(path, 1);
1655         }
1656         return err;
1657 }
1658
1659 /*
1660  * helper to add new inline back ref
1661  */
1662 static noinline_for_stack
1663 void setup_inline_extent_backref(struct btrfs_root *root,
1664                                  struct btrfs_path *path,
1665                                  struct btrfs_extent_inline_ref *iref,
1666                                  u64 parent, u64 root_objectid,
1667                                  u64 owner, u64 offset, int refs_to_add,
1668                                  struct btrfs_delayed_extent_op *extent_op)
1669 {
1670         struct extent_buffer *leaf;
1671         struct btrfs_extent_item *ei;
1672         unsigned long ptr;
1673         unsigned long end;
1674         unsigned long item_offset;
1675         u64 refs;
1676         int size;
1677         int type;
1678
1679         leaf = path->nodes[0];
1680         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681         item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683         type = extent_ref_type(parent, owner);
1684         size = btrfs_extent_inline_ref_size(type);
1685
1686         btrfs_extend_item(root, path, size);
1687
1688         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689         refs = btrfs_extent_refs(leaf, ei);
1690         refs += refs_to_add;
1691         btrfs_set_extent_refs(leaf, ei, refs);
1692         if (extent_op)
1693                 __run_delayed_extent_op(extent_op, leaf, ei);
1694
1695         ptr = (unsigned long)ei + item_offset;
1696         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697         if (ptr < end - size)
1698                 memmove_extent_buffer(leaf, ptr + size, ptr,
1699                                       end - size - ptr);
1700
1701         iref = (struct btrfs_extent_inline_ref *)ptr;
1702         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704                 struct btrfs_extent_data_ref *dref;
1705                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711                 struct btrfs_shared_data_ref *sref;
1712                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717         } else {
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719         }
1720         btrfs_mark_buffer_dirty(leaf);
1721 }
1722
1723 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724                                  struct btrfs_root *root,
1725                                  struct btrfs_path *path,
1726                                  struct btrfs_extent_inline_ref **ref_ret,
1727                                  u64 bytenr, u64 num_bytes, u64 parent,
1728                                  u64 root_objectid, u64 owner, u64 offset)
1729 {
1730         int ret;
1731
1732         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733                                            bytenr, num_bytes, parent,
1734                                            root_objectid, owner, offset, 0);
1735         if (ret != -ENOENT)
1736                 return ret;
1737
1738         btrfs_release_path(path);
1739         *ref_ret = NULL;
1740
1741         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743                                             root_objectid);
1744         } else {
1745                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746                                              root_objectid, owner, offset);
1747         }
1748         return ret;
1749 }
1750
1751 /*
1752  * helper to update/remove inline back ref
1753  */
1754 static noinline_for_stack
1755 void update_inline_extent_backref(struct btrfs_root *root,
1756                                   struct btrfs_path *path,
1757                                   struct btrfs_extent_inline_ref *iref,
1758                                   int refs_to_mod,
1759                                   struct btrfs_delayed_extent_op *extent_op,
1760                                   int *last_ref)
1761 {
1762         struct extent_buffer *leaf;
1763         struct btrfs_extent_item *ei;
1764         struct btrfs_extent_data_ref *dref = NULL;
1765         struct btrfs_shared_data_ref *sref = NULL;
1766         unsigned long ptr;
1767         unsigned long end;
1768         u32 item_size;
1769         int size;
1770         int type;
1771         u64 refs;
1772
1773         leaf = path->nodes[0];
1774         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1775         refs = btrfs_extent_refs(leaf, ei);
1776         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1777         refs += refs_to_mod;
1778         btrfs_set_extent_refs(leaf, ei, refs);
1779         if (extent_op)
1780                 __run_delayed_extent_op(extent_op, leaf, ei);
1781
1782         type = btrfs_extent_inline_ref_type(leaf, iref);
1783
1784         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1785                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1786                 refs = btrfs_extent_data_ref_count(leaf, dref);
1787         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1789                 refs = btrfs_shared_data_ref_count(leaf, sref);
1790         } else {
1791                 refs = 1;
1792                 BUG_ON(refs_to_mod != -1);
1793         }
1794
1795         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1796         refs += refs_to_mod;
1797
1798         if (refs > 0) {
1799                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1800                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1801                 else
1802                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1803         } else {
1804                 *last_ref = 1;
1805                 size =  btrfs_extent_inline_ref_size(type);
1806                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1807                 ptr = (unsigned long)iref;
1808                 end = (unsigned long)ei + item_size;
1809                 if (ptr + size < end)
1810                         memmove_extent_buffer(leaf, ptr, ptr + size,
1811                                               end - ptr - size);
1812                 item_size -= size;
1813                 btrfs_truncate_item(root, path, item_size, 1);
1814         }
1815         btrfs_mark_buffer_dirty(leaf);
1816 }
1817
1818 static noinline_for_stack
1819 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1820                                  struct btrfs_root *root,
1821                                  struct btrfs_path *path,
1822                                  u64 bytenr, u64 num_bytes, u64 parent,
1823                                  u64 root_objectid, u64 owner,
1824                                  u64 offset, int refs_to_add,
1825                                  struct btrfs_delayed_extent_op *extent_op)
1826 {
1827         struct btrfs_extent_inline_ref *iref;
1828         int ret;
1829
1830         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1831                                            bytenr, num_bytes, parent,
1832                                            root_objectid, owner, offset, 1);
1833         if (ret == 0) {
1834                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1835                 update_inline_extent_backref(root, path, iref,
1836                                              refs_to_add, extent_op, NULL);
1837         } else if (ret == -ENOENT) {
1838                 setup_inline_extent_backref(root, path, iref, parent,
1839                                             root_objectid, owner, offset,
1840                                             refs_to_add, extent_op);
1841                 ret = 0;
1842         }
1843         return ret;
1844 }
1845
1846 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1847                                  struct btrfs_root *root,
1848                                  struct btrfs_path *path,
1849                                  u64 bytenr, u64 parent, u64 root_objectid,
1850                                  u64 owner, u64 offset, int refs_to_add)
1851 {
1852         int ret;
1853         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1854                 BUG_ON(refs_to_add != 1);
1855                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1856                                             parent, root_objectid);
1857         } else {
1858                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1859                                              parent, root_objectid,
1860                                              owner, offset, refs_to_add);
1861         }
1862         return ret;
1863 }
1864
1865 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1866                                  struct btrfs_root *root,
1867                                  struct btrfs_path *path,
1868                                  struct btrfs_extent_inline_ref *iref,
1869                                  int refs_to_drop, int is_data, int *last_ref)
1870 {
1871         int ret = 0;
1872
1873         BUG_ON(!is_data && refs_to_drop != 1);
1874         if (iref) {
1875                 update_inline_extent_backref(root, path, iref,
1876                                              -refs_to_drop, NULL, last_ref);
1877         } else if (is_data) {
1878                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1879                                              last_ref);
1880         } else {
1881                 *last_ref = 1;
1882                 ret = btrfs_del_item(trans, root, path);
1883         }
1884         return ret;
1885 }
1886
1887 static int btrfs_issue_discard(struct block_device *bdev,
1888                                 u64 start, u64 len)
1889 {
1890         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891 }
1892
1893 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894                          u64 num_bytes, u64 *actual_bytes)
1895 {
1896         int ret;
1897         u64 discarded_bytes = 0;
1898         struct btrfs_bio *bbio = NULL;
1899
1900
1901         /* Tell the block device(s) that the sectors can be discarded */
1902         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903                               bytenr, &num_bytes, &bbio, 0);
1904         /* Error condition is -ENOMEM */
1905         if (!ret) {
1906                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1907                 int i;
1908
1909
1910                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911                         if (!stripe->dev->can_discard)
1912                                 continue;
1913
1914                         ret = btrfs_issue_discard(stripe->dev->bdev,
1915                                                   stripe->physical,
1916                                                   stripe->length);
1917                         if (!ret)
1918                                 discarded_bytes += stripe->length;
1919                         else if (ret != -EOPNOTSUPP)
1920                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922                         /*
1923                          * Just in case we get back EOPNOTSUPP for some reason,
1924                          * just ignore the return value so we don't screw up
1925                          * people calling discard_extent.
1926                          */
1927                         ret = 0;
1928                 }
1929                 btrfs_put_bbio(bbio);
1930         }
1931
1932         if (actual_bytes)
1933                 *actual_bytes = discarded_bytes;
1934
1935
1936         if (ret == -EOPNOTSUPP)
1937                 ret = 0;
1938         return ret;
1939 }
1940
1941 /* Can return -ENOMEM */
1942 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943                          struct btrfs_root *root,
1944                          u64 bytenr, u64 num_bytes, u64 parent,
1945                          u64 root_objectid, u64 owner, u64 offset,
1946                          int no_quota)
1947 {
1948         int ret;
1949         struct btrfs_fs_info *fs_info = root->fs_info;
1950
1951         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1952                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1953
1954         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1955                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1956                                         num_bytes,
1957                                         parent, root_objectid, (int)owner,
1958                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1959         } else {
1960                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1961                                         num_bytes,
1962                                         parent, root_objectid, owner, offset,
1963                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1964         }
1965         return ret;
1966 }
1967
1968 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1969                                   struct btrfs_root *root,
1970                                   u64 bytenr, u64 num_bytes,
1971                                   u64 parent, u64 root_objectid,
1972                                   u64 owner, u64 offset, int refs_to_add,
1973                                   int no_quota,
1974                                   struct btrfs_delayed_extent_op *extent_op)
1975 {
1976         struct btrfs_fs_info *fs_info = root->fs_info;
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         struct btrfs_key key;
1981         u64 refs;
1982         int ret;
1983         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1984
1985         path = btrfs_alloc_path();
1986         if (!path)
1987                 return -ENOMEM;
1988
1989         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1990                 no_quota = 1;
1991
1992         path->reada = 1;
1993         path->leave_spinning = 1;
1994         /* this will setup the path even if it fails to insert the back ref */
1995         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1996                                            bytenr, num_bytes, parent,
1997                                            root_objectid, owner, offset,
1998                                            refs_to_add, extent_op);
1999         if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2000                 goto out;
2001         /*
2002          * Ok we were able to insert an inline extent and it appears to be a new
2003          * reference, deal with the qgroup accounting.
2004          */
2005         if (!ret && !no_quota) {
2006                 ASSERT(root->fs_info->quota_enabled);
2007                 leaf = path->nodes[0];
2008                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2009                 item = btrfs_item_ptr(leaf, path->slots[0],
2010                                       struct btrfs_extent_item);
2011                 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2012                         type = BTRFS_QGROUP_OPER_ADD_SHARED;
2013                 btrfs_release_path(path);
2014
2015                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2016                                               bytenr, num_bytes, type, 0);
2017                 goto out;
2018         }
2019
2020         /*
2021          * Ok we had -EAGAIN which means we didn't have space to insert and
2022          * inline extent ref, so just update the reference count and add a
2023          * normal backref.
2024          */
2025         leaf = path->nodes[0];
2026         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2027         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2028         refs = btrfs_extent_refs(leaf, item);
2029         if (refs)
2030                 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2031         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2032         if (extent_op)
2033                 __run_delayed_extent_op(extent_op, leaf, item);
2034
2035         btrfs_mark_buffer_dirty(leaf);
2036         btrfs_release_path(path);
2037
2038         if (!no_quota) {
2039                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2040                                               bytenr, num_bytes, type, 0);
2041                 if (ret)
2042                         goto out;
2043         }
2044
2045         path->reada = 1;
2046         path->leave_spinning = 1;
2047         /* now insert the actual backref */
2048         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2049                                     path, bytenr, parent, root_objectid,
2050                                     owner, offset, refs_to_add);
2051         if (ret)
2052                 btrfs_abort_transaction(trans, root, ret);
2053 out:
2054         btrfs_free_path(path);
2055         return ret;
2056 }
2057
2058 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2059                                 struct btrfs_root *root,
2060                                 struct btrfs_delayed_ref_node *node,
2061                                 struct btrfs_delayed_extent_op *extent_op,
2062                                 int insert_reserved)
2063 {
2064         int ret = 0;
2065         struct btrfs_delayed_data_ref *ref;
2066         struct btrfs_key ins;
2067         u64 parent = 0;
2068         u64 ref_root = 0;
2069         u64 flags = 0;
2070
2071         ins.objectid = node->bytenr;
2072         ins.offset = node->num_bytes;
2073         ins.type = BTRFS_EXTENT_ITEM_KEY;
2074
2075         ref = btrfs_delayed_node_to_data_ref(node);
2076         trace_run_delayed_data_ref(node, ref, node->action);
2077
2078         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2079                 parent = ref->parent;
2080         ref_root = ref->root;
2081
2082         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2083                 if (extent_op)
2084                         flags |= extent_op->flags_to_set;
2085                 ret = alloc_reserved_file_extent(trans, root,
2086                                                  parent, ref_root, flags,
2087                                                  ref->objectid, ref->offset,
2088                                                  &ins, node->ref_mod);
2089         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2090                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2091                                              node->num_bytes, parent,
2092                                              ref_root, ref->objectid,
2093                                              ref->offset, node->ref_mod,
2094                                              node->no_quota, extent_op);
2095         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2096                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2097                                           node->num_bytes, parent,
2098                                           ref_root, ref->objectid,
2099                                           ref->offset, node->ref_mod,
2100                                           extent_op, node->no_quota);
2101         } else {
2102                 BUG();
2103         }
2104         return ret;
2105 }
2106
2107 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2108                                     struct extent_buffer *leaf,
2109                                     struct btrfs_extent_item *ei)
2110 {
2111         u64 flags = btrfs_extent_flags(leaf, ei);
2112         if (extent_op->update_flags) {
2113                 flags |= extent_op->flags_to_set;
2114                 btrfs_set_extent_flags(leaf, ei, flags);
2115         }
2116
2117         if (extent_op->update_key) {
2118                 struct btrfs_tree_block_info *bi;
2119                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2120                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2121                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2122         }
2123 }
2124
2125 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2126                                  struct btrfs_root *root,
2127                                  struct btrfs_delayed_ref_node *node,
2128                                  struct btrfs_delayed_extent_op *extent_op)
2129 {
2130         struct btrfs_key key;
2131         struct btrfs_path *path;
2132         struct btrfs_extent_item *ei;
2133         struct extent_buffer *leaf;
2134         u32 item_size;
2135         int ret;
2136         int err = 0;
2137         int metadata = !extent_op->is_data;
2138
2139         if (trans->aborted)
2140                 return 0;
2141
2142         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2143                 metadata = 0;
2144
2145         path = btrfs_alloc_path();
2146         if (!path)
2147                 return -ENOMEM;
2148
2149         key.objectid = node->bytenr;
2150
2151         if (metadata) {
2152                 key.type = BTRFS_METADATA_ITEM_KEY;
2153                 key.offset = extent_op->level;
2154         } else {
2155                 key.type = BTRFS_EXTENT_ITEM_KEY;
2156                 key.offset = node->num_bytes;
2157         }
2158
2159 again:
2160         path->reada = 1;
2161         path->leave_spinning = 1;
2162         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2163                                 path, 0, 1);
2164         if (ret < 0) {
2165                 err = ret;
2166                 goto out;
2167         }
2168         if (ret > 0) {
2169                 if (metadata) {
2170                         if (path->slots[0] > 0) {
2171                                 path->slots[0]--;
2172                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2173                                                       path->slots[0]);
2174                                 if (key.objectid == node->bytenr &&
2175                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2176                                     key.offset == node->num_bytes)
2177                                         ret = 0;
2178                         }
2179                         if (ret > 0) {
2180                                 btrfs_release_path(path);
2181                                 metadata = 0;
2182
2183                                 key.objectid = node->bytenr;
2184                                 key.offset = node->num_bytes;
2185                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2186                                 goto again;
2187                         }
2188                 } else {
2189                         err = -EIO;
2190                         goto out;
2191                 }
2192         }
2193
2194         leaf = path->nodes[0];
2195         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2196 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2197         if (item_size < sizeof(*ei)) {
2198                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2199                                              path, (u64)-1, 0);
2200                 if (ret < 0) {
2201                         err = ret;
2202                         goto out;
2203                 }
2204                 leaf = path->nodes[0];
2205                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2206         }
2207 #endif
2208         BUG_ON(item_size < sizeof(*ei));
2209         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2210         __run_delayed_extent_op(extent_op, leaf, ei);
2211
2212         btrfs_mark_buffer_dirty(leaf);
2213 out:
2214         btrfs_free_path(path);
2215         return err;
2216 }
2217
2218 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2219                                 struct btrfs_root *root,
2220                                 struct btrfs_delayed_ref_node *node,
2221                                 struct btrfs_delayed_extent_op *extent_op,
2222                                 int insert_reserved)
2223 {
2224         int ret = 0;
2225         struct btrfs_delayed_tree_ref *ref;
2226         struct btrfs_key ins;
2227         u64 parent = 0;
2228         u64 ref_root = 0;
2229         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2230                                                  SKINNY_METADATA);
2231
2232         ref = btrfs_delayed_node_to_tree_ref(node);
2233         trace_run_delayed_tree_ref(node, ref, node->action);
2234
2235         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2236                 parent = ref->parent;
2237         ref_root = ref->root;
2238
2239         ins.objectid = node->bytenr;
2240         if (skinny_metadata) {
2241                 ins.offset = ref->level;
2242                 ins.type = BTRFS_METADATA_ITEM_KEY;
2243         } else {
2244                 ins.offset = node->num_bytes;
2245                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2246         }
2247
2248         BUG_ON(node->ref_mod != 1);
2249         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2250                 BUG_ON(!extent_op || !extent_op->update_flags);
2251                 ret = alloc_reserved_tree_block(trans, root,
2252                                                 parent, ref_root,
2253                                                 extent_op->flags_to_set,
2254                                                 &extent_op->key,
2255                                                 ref->level, &ins,
2256                                                 node->no_quota);
2257         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2258                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2259                                              node->num_bytes, parent, ref_root,
2260                                              ref->level, 0, 1, node->no_quota,
2261                                              extent_op);
2262         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2263                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2264                                           node->num_bytes, parent, ref_root,
2265                                           ref->level, 0, 1, extent_op,
2266                                           node->no_quota);
2267         } else {
2268                 BUG();
2269         }
2270         return ret;
2271 }
2272
2273 /* helper function to actually process a single delayed ref entry */
2274 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2275                                struct btrfs_root *root,
2276                                struct btrfs_delayed_ref_node *node,
2277                                struct btrfs_delayed_extent_op *extent_op,
2278                                int insert_reserved)
2279 {
2280         int ret = 0;
2281
2282         if (trans->aborted) {
2283                 if (insert_reserved)
2284                         btrfs_pin_extent(root, node->bytenr,
2285                                          node->num_bytes, 1);
2286                 return 0;
2287         }
2288
2289         if (btrfs_delayed_ref_is_head(node)) {
2290                 struct btrfs_delayed_ref_head *head;
2291                 /*
2292                  * we've hit the end of the chain and we were supposed
2293                  * to insert this extent into the tree.  But, it got
2294                  * deleted before we ever needed to insert it, so all
2295                  * we have to do is clean up the accounting
2296                  */
2297                 BUG_ON(extent_op);
2298                 head = btrfs_delayed_node_to_head(node);
2299                 trace_run_delayed_ref_head(node, head, node->action);
2300
2301                 if (insert_reserved) {
2302                         btrfs_pin_extent(root, node->bytenr,
2303                                          node->num_bytes, 1);
2304                         if (head->is_data) {
2305                                 ret = btrfs_del_csums(trans, root,
2306                                                       node->bytenr,
2307                                                       node->num_bytes);
2308                         }
2309                 }
2310                 return ret;
2311         }
2312
2313         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2314             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2315                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2316                                            insert_reserved);
2317         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2318                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2319                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2320                                            insert_reserved);
2321         else
2322                 BUG();
2323         return ret;
2324 }
2325
2326 static noinline struct btrfs_delayed_ref_node *
2327 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2328 {
2329         struct rb_node *node;
2330         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2331
2332         /*
2333          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2334          * this prevents ref count from going down to zero when
2335          * there still are pending delayed ref.
2336          */
2337         node = rb_first(&head->ref_root);
2338         while (node) {
2339                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2340                                 rb_node);
2341                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2342                         return ref;
2343                 else if (last == NULL)
2344                         last = ref;
2345                 node = rb_next(node);
2346         }
2347         return last;
2348 }
2349
2350 /*
2351  * Returns 0 on success or if called with an already aborted transaction.
2352  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2353  */
2354 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2355                                              struct btrfs_root *root,
2356                                              unsigned long nr)
2357 {
2358         struct btrfs_delayed_ref_root *delayed_refs;
2359         struct btrfs_delayed_ref_node *ref;
2360         struct btrfs_delayed_ref_head *locked_ref = NULL;
2361         struct btrfs_delayed_extent_op *extent_op;
2362         struct btrfs_fs_info *fs_info = root->fs_info;
2363         ktime_t start = ktime_get();
2364         int ret;
2365         unsigned long count = 0;
2366         unsigned long actual_count = 0;
2367         int must_insert_reserved = 0;
2368
2369         delayed_refs = &trans->transaction->delayed_refs;
2370         while (1) {
2371                 if (!locked_ref) {
2372                         if (count >= nr)
2373                                 break;
2374
2375                         spin_lock(&delayed_refs->lock);
2376                         locked_ref = btrfs_select_ref_head(trans);
2377                         if (!locked_ref) {
2378                                 spin_unlock(&delayed_refs->lock);
2379                                 break;
2380                         }
2381
2382                         /* grab the lock that says we are going to process
2383                          * all the refs for this head */
2384                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2385                         spin_unlock(&delayed_refs->lock);
2386                         /*
2387                          * we may have dropped the spin lock to get the head
2388                          * mutex lock, and that might have given someone else
2389                          * time to free the head.  If that's true, it has been
2390                          * removed from our list and we can move on.
2391                          */
2392                         if (ret == -EAGAIN) {
2393                                 locked_ref = NULL;
2394                                 count++;
2395                                 continue;
2396                         }
2397                 }
2398
2399                 /*
2400                  * We need to try and merge add/drops of the same ref since we
2401                  * can run into issues with relocate dropping the implicit ref
2402                  * and then it being added back again before the drop can
2403                  * finish.  If we merged anything we need to re-loop so we can
2404                  * get a good ref.
2405                  */
2406                 spin_lock(&locked_ref->lock);
2407                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2408                                          locked_ref);
2409
2410                 /*
2411                  * locked_ref is the head node, so we have to go one
2412                  * node back for any delayed ref updates
2413                  */
2414                 ref = select_delayed_ref(locked_ref);
2415
2416                 if (ref && ref->seq &&
2417                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2418                         spin_unlock(&locked_ref->lock);
2419                         btrfs_delayed_ref_unlock(locked_ref);
2420                         spin_lock(&delayed_refs->lock);
2421                         locked_ref->processing = 0;
2422                         delayed_refs->num_heads_ready++;
2423                         spin_unlock(&delayed_refs->lock);
2424                         locked_ref = NULL;
2425                         cond_resched();
2426                         count++;
2427                         continue;
2428                 }
2429
2430                 /*
2431                  * record the must insert reserved flag before we
2432                  * drop the spin lock.
2433                  */
2434                 must_insert_reserved = locked_ref->must_insert_reserved;
2435                 locked_ref->must_insert_reserved = 0;
2436
2437                 extent_op = locked_ref->extent_op;
2438                 locked_ref->extent_op = NULL;
2439
2440                 if (!ref) {
2441
2442
2443                         /* All delayed refs have been processed, Go ahead
2444                          * and send the head node to run_one_delayed_ref,
2445                          * so that any accounting fixes can happen
2446                          */
2447                         ref = &locked_ref->node;
2448
2449                         if (extent_op && must_insert_reserved) {
2450                                 btrfs_free_delayed_extent_op(extent_op);
2451                                 extent_op = NULL;
2452                         }
2453
2454                         if (extent_op) {
2455                                 spin_unlock(&locked_ref->lock);
2456                                 ret = run_delayed_extent_op(trans, root,
2457                                                             ref, extent_op);
2458                                 btrfs_free_delayed_extent_op(extent_op);
2459
2460                                 if (ret) {
2461                                         /*
2462                                          * Need to reset must_insert_reserved if
2463                                          * there was an error so the abort stuff
2464                                          * can cleanup the reserved space
2465                                          * properly.
2466                                          */
2467                                         if (must_insert_reserved)
2468                                                 locked_ref->must_insert_reserved = 1;
2469                                         locked_ref->processing = 0;
2470                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2471                                         btrfs_delayed_ref_unlock(locked_ref);
2472                                         return ret;
2473                                 }
2474                                 continue;
2475                         }
2476
2477                         /*
2478                          * Need to drop our head ref lock and re-aqcuire the
2479                          * delayed ref lock and then re-check to make sure
2480                          * nobody got added.
2481                          */
2482                         spin_unlock(&locked_ref->lock);
2483                         spin_lock(&delayed_refs->lock);
2484                         spin_lock(&locked_ref->lock);
2485                         if (rb_first(&locked_ref->ref_root) ||
2486                             locked_ref->extent_op) {
2487                                 spin_unlock(&locked_ref->lock);
2488                                 spin_unlock(&delayed_refs->lock);
2489                                 continue;
2490                         }
2491                         ref->in_tree = 0;
2492                         delayed_refs->num_heads--;
2493                         rb_erase(&locked_ref->href_node,
2494                                  &delayed_refs->href_root);
2495                         spin_unlock(&delayed_refs->lock);
2496                 } else {
2497                         actual_count++;
2498                         ref->in_tree = 0;
2499                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2500                 }
2501                 atomic_dec(&delayed_refs->num_entries);
2502
2503                 if (!btrfs_delayed_ref_is_head(ref)) {
2504                         /*
2505                          * when we play the delayed ref, also correct the
2506                          * ref_mod on head
2507                          */
2508                         switch (ref->action) {
2509                         case BTRFS_ADD_DELAYED_REF:
2510                         case BTRFS_ADD_DELAYED_EXTENT:
2511                                 locked_ref->node.ref_mod -= ref->ref_mod;
2512                                 break;
2513                         case BTRFS_DROP_DELAYED_REF:
2514                                 locked_ref->node.ref_mod += ref->ref_mod;
2515                                 break;
2516                         default:
2517                                 WARN_ON(1);
2518                         }
2519                 }
2520                 spin_unlock(&locked_ref->lock);
2521
2522                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2523                                           must_insert_reserved);
2524
2525                 btrfs_free_delayed_extent_op(extent_op);
2526                 if (ret) {
2527                         locked_ref->processing = 0;
2528                         btrfs_delayed_ref_unlock(locked_ref);
2529                         btrfs_put_delayed_ref(ref);
2530                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2531                         return ret;
2532                 }
2533
2534                 /*
2535                  * If this node is a head, that means all the refs in this head
2536                  * have been dealt with, and we will pick the next head to deal
2537                  * with, so we must unlock the head and drop it from the cluster
2538                  * list before we release it.
2539                  */
2540                 if (btrfs_delayed_ref_is_head(ref)) {
2541                         if (locked_ref->is_data &&
2542                             locked_ref->total_ref_mod < 0) {
2543                                 spin_lock(&delayed_refs->lock);
2544                                 delayed_refs->pending_csums -= ref->num_bytes;
2545                                 spin_unlock(&delayed_refs->lock);
2546                         }
2547                         btrfs_delayed_ref_unlock(locked_ref);
2548                         locked_ref = NULL;
2549                 }
2550                 btrfs_put_delayed_ref(ref);
2551                 count++;
2552                 cond_resched();
2553         }
2554
2555         /*
2556          * We don't want to include ref heads since we can have empty ref heads
2557          * and those will drastically skew our runtime down since we just do
2558          * accounting, no actual extent tree updates.
2559          */
2560         if (actual_count > 0) {
2561                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2562                 u64 avg;
2563
2564                 /*
2565                  * We weigh the current average higher than our current runtime
2566                  * to avoid large swings in the average.
2567                  */
2568                 spin_lock(&delayed_refs->lock);
2569                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2570                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2571                 spin_unlock(&delayed_refs->lock);
2572         }
2573         return 0;
2574 }
2575
2576 #ifdef SCRAMBLE_DELAYED_REFS
2577 /*
2578  * Normally delayed refs get processed in ascending bytenr order. This
2579  * correlates in most cases to the order added. To expose dependencies on this
2580  * order, we start to process the tree in the middle instead of the beginning
2581  */
2582 static u64 find_middle(struct rb_root *root)
2583 {
2584         struct rb_node *n = root->rb_node;
2585         struct btrfs_delayed_ref_node *entry;
2586         int alt = 1;
2587         u64 middle;
2588         u64 first = 0, last = 0;
2589
2590         n = rb_first(root);
2591         if (n) {
2592                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2593                 first = entry->bytenr;
2594         }
2595         n = rb_last(root);
2596         if (n) {
2597                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2598                 last = entry->bytenr;
2599         }
2600         n = root->rb_node;
2601
2602         while (n) {
2603                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2604                 WARN_ON(!entry->in_tree);
2605
2606                 middle = entry->bytenr;
2607
2608                 if (alt)
2609                         n = n->rb_left;
2610                 else
2611                         n = n->rb_right;
2612
2613                 alt = 1 - alt;
2614         }
2615         return middle;
2616 }
2617 #endif
2618
2619 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2620 {
2621         u64 num_bytes;
2622
2623         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2624                              sizeof(struct btrfs_extent_inline_ref));
2625         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2626                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2627
2628         /*
2629          * We don't ever fill up leaves all the way so multiply by 2 just to be
2630          * closer to what we're really going to want to ouse.
2631          */
2632         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2633 }
2634
2635 /*
2636  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2637  * would require to store the csums for that many bytes.
2638  */
2639 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2640 {
2641         u64 csum_size;
2642         u64 num_csums_per_leaf;
2643         u64 num_csums;
2644
2645         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2646         num_csums_per_leaf = div64_u64(csum_size,
2647                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2648         num_csums = div64_u64(csum_bytes, root->sectorsize);
2649         num_csums += num_csums_per_leaf - 1;
2650         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2651         return num_csums;
2652 }
2653
2654 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2655                                        struct btrfs_root *root)
2656 {
2657         struct btrfs_block_rsv *global_rsv;
2658         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2659         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2660         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2661         u64 num_bytes, num_dirty_bgs_bytes;
2662         int ret = 0;
2663
2664         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2665         num_heads = heads_to_leaves(root, num_heads);
2666         if (num_heads > 1)
2667                 num_bytes += (num_heads - 1) * root->nodesize;
2668         num_bytes <<= 1;
2669         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2670         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2671                                                              num_dirty_bgs);
2672         global_rsv = &root->fs_info->global_block_rsv;
2673
2674         /*
2675          * If we can't allocate any more chunks lets make sure we have _lots_ of
2676          * wiggle room since running delayed refs can create more delayed refs.
2677          */
2678         if (global_rsv->space_info->full) {
2679                 num_dirty_bgs_bytes <<= 1;
2680                 num_bytes <<= 1;
2681         }
2682
2683         spin_lock(&global_rsv->lock);
2684         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2685                 ret = 1;
2686         spin_unlock(&global_rsv->lock);
2687         return ret;
2688 }
2689
2690 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2691                                        struct btrfs_root *root)
2692 {
2693         struct btrfs_fs_info *fs_info = root->fs_info;
2694         u64 num_entries =
2695                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2696         u64 avg_runtime;
2697         u64 val;
2698
2699         smp_mb();
2700         avg_runtime = fs_info->avg_delayed_ref_runtime;
2701         val = num_entries * avg_runtime;
2702         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2703                 return 1;
2704         if (val >= NSEC_PER_SEC / 2)
2705                 return 2;
2706
2707         return btrfs_check_space_for_delayed_refs(trans, root);
2708 }
2709
2710 struct async_delayed_refs {
2711         struct btrfs_root *root;
2712         int count;
2713         int error;
2714         int sync;
2715         struct completion wait;
2716         struct btrfs_work work;
2717 };
2718
2719 static void delayed_ref_async_start(struct btrfs_work *work)
2720 {
2721         struct async_delayed_refs *async;
2722         struct btrfs_trans_handle *trans;
2723         int ret;
2724
2725         async = container_of(work, struct async_delayed_refs, work);
2726
2727         trans = btrfs_join_transaction(async->root);
2728         if (IS_ERR(trans)) {
2729                 async->error = PTR_ERR(trans);
2730                 goto done;
2731         }
2732
2733         /*
2734          * trans->sync means that when we call end_transaciton, we won't
2735          * wait on delayed refs
2736          */
2737         trans->sync = true;
2738         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2739         if (ret)
2740                 async->error = ret;
2741
2742         ret = btrfs_end_transaction(trans, async->root);
2743         if (ret && !async->error)
2744                 async->error = ret;
2745 done:
2746         if (async->sync)
2747                 complete(&async->wait);
2748         else
2749                 kfree(async);
2750 }
2751
2752 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2753                                  unsigned long count, int wait)
2754 {
2755         struct async_delayed_refs *async;
2756         int ret;
2757
2758         async = kmalloc(sizeof(*async), GFP_NOFS);
2759         if (!async)
2760                 return -ENOMEM;
2761
2762         async->root = root->fs_info->tree_root;
2763         async->count = count;
2764         async->error = 0;
2765         if (wait)
2766                 async->sync = 1;
2767         else
2768                 async->sync = 0;
2769         init_completion(&async->wait);
2770
2771         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2772                         delayed_ref_async_start, NULL, NULL);
2773
2774         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2775
2776         if (wait) {
2777                 wait_for_completion(&async->wait);
2778                 ret = async->error;
2779                 kfree(async);
2780                 return ret;
2781         }
2782         return 0;
2783 }
2784
2785 /*
2786  * this starts processing the delayed reference count updates and
2787  * extent insertions we have queued up so far.  count can be
2788  * 0, which means to process everything in the tree at the start
2789  * of the run (but not newly added entries), or it can be some target
2790  * number you'd like to process.
2791  *
2792  * Returns 0 on success or if called with an aborted transaction
2793  * Returns <0 on error and aborts the transaction
2794  */
2795 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2796                            struct btrfs_root *root, unsigned long count)
2797 {
2798         struct rb_node *node;
2799         struct btrfs_delayed_ref_root *delayed_refs;
2800         struct btrfs_delayed_ref_head *head;
2801         int ret;
2802         int run_all = count == (unsigned long)-1;
2803
2804         /* We'll clean this up in btrfs_cleanup_transaction */
2805         if (trans->aborted)
2806                 return 0;
2807
2808         if (root == root->fs_info->extent_root)
2809                 root = root->fs_info->tree_root;
2810
2811         delayed_refs = &trans->transaction->delayed_refs;
2812         if (count == 0)
2813                 count = atomic_read(&delayed_refs->num_entries) * 2;
2814
2815 again:
2816 #ifdef SCRAMBLE_DELAYED_REFS
2817         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2818 #endif
2819         ret = __btrfs_run_delayed_refs(trans, root, count);
2820         if (ret < 0) {
2821                 btrfs_abort_transaction(trans, root, ret);
2822                 return ret;
2823         }
2824
2825         if (run_all) {
2826                 if (!list_empty(&trans->new_bgs))
2827                         btrfs_create_pending_block_groups(trans, root);
2828
2829                 spin_lock(&delayed_refs->lock);
2830                 node = rb_first(&delayed_refs->href_root);
2831                 if (!node) {
2832                         spin_unlock(&delayed_refs->lock);
2833                         goto out;
2834                 }
2835                 count = (unsigned long)-1;
2836
2837                 while (node) {
2838                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2839                                         href_node);
2840                         if (btrfs_delayed_ref_is_head(&head->node)) {
2841                                 struct btrfs_delayed_ref_node *ref;
2842
2843                                 ref = &head->node;
2844                                 atomic_inc(&ref->refs);
2845
2846                                 spin_unlock(&delayed_refs->lock);
2847                                 /*
2848                                  * Mutex was contended, block until it's
2849                                  * released and try again
2850                                  */
2851                                 mutex_lock(&head->mutex);
2852                                 mutex_unlock(&head->mutex);
2853
2854                                 btrfs_put_delayed_ref(ref);
2855                                 cond_resched();
2856                                 goto again;
2857                         } else {
2858                                 WARN_ON(1);
2859                         }
2860                         node = rb_next(node);
2861                 }
2862                 spin_unlock(&delayed_refs->lock);
2863                 cond_resched();
2864                 goto again;
2865         }
2866 out:
2867         ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2868         if (ret)
2869                 return ret;
2870         assert_qgroups_uptodate(trans);
2871         return 0;
2872 }
2873
2874 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2875                                 struct btrfs_root *root,
2876                                 u64 bytenr, u64 num_bytes, u64 flags,
2877                                 int level, int is_data)
2878 {
2879         struct btrfs_delayed_extent_op *extent_op;
2880         int ret;
2881
2882         extent_op = btrfs_alloc_delayed_extent_op();
2883         if (!extent_op)
2884                 return -ENOMEM;
2885
2886         extent_op->flags_to_set = flags;
2887         extent_op->update_flags = 1;
2888         extent_op->update_key = 0;
2889         extent_op->is_data = is_data ? 1 : 0;
2890         extent_op->level = level;
2891
2892         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2893                                           num_bytes, extent_op);
2894         if (ret)
2895                 btrfs_free_delayed_extent_op(extent_op);
2896         return ret;
2897 }
2898
2899 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2900                                       struct btrfs_root *root,
2901                                       struct btrfs_path *path,
2902                                       u64 objectid, u64 offset, u64 bytenr)
2903 {
2904         struct btrfs_delayed_ref_head *head;
2905         struct btrfs_delayed_ref_node *ref;
2906         struct btrfs_delayed_data_ref *data_ref;
2907         struct btrfs_delayed_ref_root *delayed_refs;
2908         struct rb_node *node;
2909         int ret = 0;
2910
2911         delayed_refs = &trans->transaction->delayed_refs;
2912         spin_lock(&delayed_refs->lock);
2913         head = btrfs_find_delayed_ref_head(trans, bytenr);
2914         if (!head) {
2915                 spin_unlock(&delayed_refs->lock);
2916                 return 0;
2917         }
2918
2919         if (!mutex_trylock(&head->mutex)) {
2920                 atomic_inc(&head->node.refs);
2921                 spin_unlock(&delayed_refs->lock);
2922
2923                 btrfs_release_path(path);
2924
2925                 /*
2926                  * Mutex was contended, block until it's released and let
2927                  * caller try again
2928                  */
2929                 mutex_lock(&head->mutex);
2930                 mutex_unlock(&head->mutex);
2931                 btrfs_put_delayed_ref(&head->node);
2932                 return -EAGAIN;
2933         }
2934         spin_unlock(&delayed_refs->lock);
2935
2936         spin_lock(&head->lock);
2937         node = rb_first(&head->ref_root);
2938         while (node) {
2939                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2940                 node = rb_next(node);
2941
2942                 /* If it's a shared ref we know a cross reference exists */
2943                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2944                         ret = 1;
2945                         break;
2946                 }
2947
2948                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2949
2950                 /*
2951                  * If our ref doesn't match the one we're currently looking at
2952                  * then we have a cross reference.
2953                  */
2954                 if (data_ref->root != root->root_key.objectid ||
2955                     data_ref->objectid != objectid ||
2956                     data_ref->offset != offset) {
2957                         ret = 1;
2958                         break;
2959                 }
2960         }
2961         spin_unlock(&head->lock);
2962         mutex_unlock(&head->mutex);
2963         return ret;
2964 }
2965
2966 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2967                                         struct btrfs_root *root,
2968                                         struct btrfs_path *path,
2969                                         u64 objectid, u64 offset, u64 bytenr)
2970 {
2971         struct btrfs_root *extent_root = root->fs_info->extent_root;
2972         struct extent_buffer *leaf;
2973         struct btrfs_extent_data_ref *ref;
2974         struct btrfs_extent_inline_ref *iref;
2975         struct btrfs_extent_item *ei;
2976         struct btrfs_key key;
2977         u32 item_size;
2978         int ret;
2979
2980         key.objectid = bytenr;
2981         key.offset = (u64)-1;
2982         key.type = BTRFS_EXTENT_ITEM_KEY;
2983
2984         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2985         if (ret < 0)
2986                 goto out;
2987         BUG_ON(ret == 0); /* Corruption */
2988
2989         ret = -ENOENT;
2990         if (path->slots[0] == 0)
2991                 goto out;
2992
2993         path->slots[0]--;
2994         leaf = path->nodes[0];
2995         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2996
2997         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2998                 goto out;
2999
3000         ret = 1;
3001         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3002 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3003         if (item_size < sizeof(*ei)) {
3004                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3005                 goto out;
3006         }
3007 #endif
3008         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3009
3010         if (item_size != sizeof(*ei) +
3011             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3012                 goto out;
3013
3014         if (btrfs_extent_generation(leaf, ei) <=
3015             btrfs_root_last_snapshot(&root->root_item))
3016                 goto out;
3017
3018         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3019         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3020             BTRFS_EXTENT_DATA_REF_KEY)
3021                 goto out;
3022
3023         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3024         if (btrfs_extent_refs(leaf, ei) !=
3025             btrfs_extent_data_ref_count(leaf, ref) ||
3026             btrfs_extent_data_ref_root(leaf, ref) !=
3027             root->root_key.objectid ||
3028             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3029             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3030                 goto out;
3031
3032         ret = 0;
3033 out:
3034         return ret;
3035 }
3036
3037 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3038                           struct btrfs_root *root,
3039                           u64 objectid, u64 offset, u64 bytenr)
3040 {
3041         struct btrfs_path *path;
3042         int ret;
3043         int ret2;
3044
3045         path = btrfs_alloc_path();
3046         if (!path)
3047                 return -ENOENT;
3048
3049         do {
3050                 ret = check_committed_ref(trans, root, path, objectid,
3051                                           offset, bytenr);
3052                 if (ret && ret != -ENOENT)
3053                         goto out;
3054
3055                 ret2 = check_delayed_ref(trans, root, path, objectid,
3056                                          offset, bytenr);
3057         } while (ret2 == -EAGAIN);
3058
3059         if (ret2 && ret2 != -ENOENT) {
3060                 ret = ret2;
3061                 goto out;
3062         }
3063
3064         if (ret != -ENOENT || ret2 != -ENOENT)
3065                 ret = 0;
3066 out:
3067         btrfs_free_path(path);
3068         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3069                 WARN_ON(ret > 0);
3070         return ret;
3071 }
3072
3073 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3074                            struct btrfs_root *root,
3075                            struct extent_buffer *buf,
3076                            int full_backref, int inc)
3077 {
3078         u64 bytenr;
3079         u64 num_bytes;
3080         u64 parent;
3081         u64 ref_root;
3082         u32 nritems;
3083         struct btrfs_key key;
3084         struct btrfs_file_extent_item *fi;
3085         int i;
3086         int level;
3087         int ret = 0;
3088         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3089                             u64, u64, u64, u64, u64, u64, int);
3090
3091
3092         if (btrfs_test_is_dummy_root(root))
3093                 return 0;
3094
3095         ref_root = btrfs_header_owner(buf);
3096         nritems = btrfs_header_nritems(buf);
3097         level = btrfs_header_level(buf);
3098
3099         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3100                 return 0;
3101
3102         if (inc)
3103                 process_func = btrfs_inc_extent_ref;
3104         else
3105                 process_func = btrfs_free_extent;
3106
3107         if (full_backref)
3108                 parent = buf->start;
3109         else
3110                 parent = 0;
3111
3112         for (i = 0; i < nritems; i++) {
3113                 if (level == 0) {
3114                         btrfs_item_key_to_cpu(buf, &key, i);
3115                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3116                                 continue;
3117                         fi = btrfs_item_ptr(buf, i,
3118                                             struct btrfs_file_extent_item);
3119                         if (btrfs_file_extent_type(buf, fi) ==
3120                             BTRFS_FILE_EXTENT_INLINE)
3121                                 continue;
3122                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3123                         if (bytenr == 0)
3124                                 continue;
3125
3126                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3127                         key.offset -= btrfs_file_extent_offset(buf, fi);
3128                         ret = process_func(trans, root, bytenr, num_bytes,
3129                                            parent, ref_root, key.objectid,
3130                                            key.offset, 1);
3131                         if (ret)
3132                                 goto fail;
3133                 } else {
3134                         bytenr = btrfs_node_blockptr(buf, i);
3135                         num_bytes = root->nodesize;
3136                         ret = process_func(trans, root, bytenr, num_bytes,
3137                                            parent, ref_root, level - 1, 0,
3138                                            1);
3139                         if (ret)
3140                                 goto fail;
3141                 }
3142         }
3143         return 0;
3144 fail:
3145         return ret;
3146 }
3147
3148 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3149                   struct extent_buffer *buf, int full_backref)
3150 {
3151         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3152 }
3153
3154 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3155                   struct extent_buffer *buf, int full_backref)
3156 {
3157         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3158 }
3159
3160 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3161                                  struct btrfs_root *root,
3162                                  struct btrfs_path *path,
3163                                  struct btrfs_block_group_cache *cache)
3164 {
3165         int ret;
3166         struct btrfs_root *extent_root = root->fs_info->extent_root;
3167         unsigned long bi;
3168         struct extent_buffer *leaf;
3169
3170         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3171         if (ret) {
3172                 if (ret > 0)
3173                         ret = -ENOENT;
3174                 goto fail;
3175         }
3176
3177         leaf = path->nodes[0];
3178         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3179         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3180         btrfs_mark_buffer_dirty(leaf);
3181         btrfs_release_path(path);
3182 fail:
3183         if (ret)
3184                 btrfs_abort_transaction(trans, root, ret);
3185         return ret;
3186
3187 }
3188
3189 static struct btrfs_block_group_cache *
3190 next_block_group(struct btrfs_root *root,
3191                  struct btrfs_block_group_cache *cache)
3192 {
3193         struct rb_node *node;
3194
3195         spin_lock(&root->fs_info->block_group_cache_lock);
3196
3197         /* If our block group was removed, we need a full search. */
3198         if (RB_EMPTY_NODE(&cache->cache_node)) {
3199                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3200
3201                 spin_unlock(&root->fs_info->block_group_cache_lock);
3202                 btrfs_put_block_group(cache);
3203                 cache = btrfs_lookup_first_block_group(root->fs_info,
3204                                                        next_bytenr);
3205                 return cache;
3206         }
3207         node = rb_next(&cache->cache_node);
3208         btrfs_put_block_group(cache);
3209         if (node) {
3210                 cache = rb_entry(node, struct btrfs_block_group_cache,
3211                                  cache_node);
3212                 btrfs_get_block_group(cache);
3213         } else
3214                 cache = NULL;
3215         spin_unlock(&root->fs_info->block_group_cache_lock);
3216         return cache;
3217 }
3218
3219 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3220                             struct btrfs_trans_handle *trans,
3221                             struct btrfs_path *path)
3222 {
3223         struct btrfs_root *root = block_group->fs_info->tree_root;
3224         struct inode *inode = NULL;
3225         u64 alloc_hint = 0;
3226         int dcs = BTRFS_DC_ERROR;
3227         u64 num_pages = 0;
3228         int retries = 0;
3229         int ret = 0;
3230
3231         /*
3232          * If this block group is smaller than 100 megs don't bother caching the
3233          * block group.
3234          */
3235         if (block_group->key.offset < (100 * 1024 * 1024)) {
3236                 spin_lock(&block_group->lock);
3237                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3238                 spin_unlock(&block_group->lock);
3239                 return 0;
3240         }
3241
3242         if (trans->aborted)
3243                 return 0;
3244 again:
3245         inode = lookup_free_space_inode(root, block_group, path);
3246         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3247                 ret = PTR_ERR(inode);
3248                 btrfs_release_path(path);
3249                 goto out;
3250         }
3251
3252         if (IS_ERR(inode)) {
3253                 BUG_ON(retries);
3254                 retries++;
3255
3256                 if (block_group->ro)
3257                         goto out_free;
3258
3259                 ret = create_free_space_inode(root, trans, block_group, path);
3260                 if (ret)
3261                         goto out_free;
3262                 goto again;
3263         }
3264
3265         /* We've already setup this transaction, go ahead and exit */
3266         if (block_group->cache_generation == trans->transid &&
3267             i_size_read(inode)) {
3268                 dcs = BTRFS_DC_SETUP;
3269                 goto out_put;
3270         }
3271
3272         /*
3273          * We want to set the generation to 0, that way if anything goes wrong
3274          * from here on out we know not to trust this cache when we load up next
3275          * time.
3276          */
3277         BTRFS_I(inode)->generation = 0;
3278         ret = btrfs_update_inode(trans, root, inode);
3279         if (ret) {
3280                 /*
3281                  * So theoretically we could recover from this, simply set the
3282                  * super cache generation to 0 so we know to invalidate the
3283                  * cache, but then we'd have to keep track of the block groups
3284                  * that fail this way so we know we _have_ to reset this cache
3285                  * before the next commit or risk reading stale cache.  So to
3286                  * limit our exposure to horrible edge cases lets just abort the
3287                  * transaction, this only happens in really bad situations
3288                  * anyway.
3289                  */
3290                 btrfs_abort_transaction(trans, root, ret);
3291                 goto out_put;
3292         }
3293         WARN_ON(ret);
3294
3295         if (i_size_read(inode) > 0) {
3296                 ret = btrfs_check_trunc_cache_free_space(root,
3297                                         &root->fs_info->global_block_rsv);
3298                 if (ret)
3299                         goto out_put;
3300
3301                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3302                 if (ret)
3303                         goto out_put;
3304         }
3305
3306         spin_lock(&block_group->lock);
3307         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3308             !btrfs_test_opt(root, SPACE_CACHE) ||
3309             block_group->delalloc_bytes) {
3310                 /*
3311                  * don't bother trying to write stuff out _if_
3312                  * a) we're not cached,
3313                  * b) we're with nospace_cache mount option.
3314                  */
3315                 dcs = BTRFS_DC_WRITTEN;
3316                 spin_unlock(&block_group->lock);
3317                 goto out_put;
3318         }
3319         spin_unlock(&block_group->lock);
3320
3321         /*
3322          * Try to preallocate enough space based on how big the block group is.
3323          * Keep in mind this has to include any pinned space which could end up
3324          * taking up quite a bit since it's not folded into the other space
3325          * cache.
3326          */
3327         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3328         if (!num_pages)
3329                 num_pages = 1;
3330
3331         num_pages *= 16;
3332         num_pages *= PAGE_CACHE_SIZE;
3333
3334         ret = btrfs_check_data_free_space(inode, num_pages);
3335         if (ret)
3336                 goto out_put;
3337
3338         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3339                                               num_pages, num_pages,
3340                                               &alloc_hint);
3341         if (!ret)
3342                 dcs = BTRFS_DC_SETUP;
3343         btrfs_free_reserved_data_space(inode, num_pages);
3344
3345 out_put:
3346         iput(inode);
3347 out_free:
3348         btrfs_release_path(path);
3349 out:
3350         spin_lock(&block_group->lock);
3351         if (!ret && dcs == BTRFS_DC_SETUP)
3352                 block_group->cache_generation = trans->transid;
3353         block_group->disk_cache_state = dcs;
3354         spin_unlock(&block_group->lock);
3355
3356         return ret;
3357 }
3358
3359 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3360                             struct btrfs_root *root)
3361 {
3362         struct btrfs_block_group_cache *cache, *tmp;
3363         struct btrfs_transaction *cur_trans = trans->transaction;
3364         struct btrfs_path *path;
3365
3366         if (list_empty(&cur_trans->dirty_bgs) ||
3367             !btrfs_test_opt(root, SPACE_CACHE))
3368                 return 0;
3369
3370         path = btrfs_alloc_path();
3371         if (!path)
3372                 return -ENOMEM;
3373
3374         /* Could add new block groups, use _safe just in case */
3375         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3376                                  dirty_list) {
3377                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3378                         cache_save_setup(cache, trans, path);
3379         }
3380
3381         btrfs_free_path(path);
3382         return 0;
3383 }
3384
3385 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3386                                    struct btrfs_root *root)
3387 {
3388         struct btrfs_block_group_cache *cache;
3389         struct btrfs_transaction *cur_trans = trans->transaction;
3390         int ret = 0;
3391         struct btrfs_path *path;
3392
3393         if (list_empty(&cur_trans->dirty_bgs))
3394                 return 0;
3395
3396         path = btrfs_alloc_path();
3397         if (!path)
3398                 return -ENOMEM;
3399
3400         /*
3401          * We don't need the lock here since we are protected by the transaction
3402          * commit.  We want to do the cache_save_setup first and then run the
3403          * delayed refs to make sure we have the best chance at doing this all
3404          * in one shot.
3405          */
3406         while (!list_empty(&cur_trans->dirty_bgs)) {
3407                 cache = list_first_entry(&cur_trans->dirty_bgs,
3408                                          struct btrfs_block_group_cache,
3409                                          dirty_list);
3410                 list_del_init(&cache->dirty_list);
3411                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3412                         cache_save_setup(cache, trans, path);
3413                 if (!ret)
3414                         ret = btrfs_run_delayed_refs(trans, root,
3415                                                      (unsigned long) -1);
3416                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
3417                         btrfs_write_out_cache(root, trans, cache, path);
3418                 if (!ret)
3419                         ret = write_one_cache_group(trans, root, path, cache);
3420                 btrfs_put_block_group(cache);
3421         }
3422
3423         btrfs_free_path(path);
3424         return ret;
3425 }
3426
3427 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3428 {
3429         struct btrfs_block_group_cache *block_group;
3430         int readonly = 0;
3431
3432         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3433         if (!block_group || block_group->ro)
3434                 readonly = 1;
3435         if (block_group)
3436                 btrfs_put_block_group(block_group);
3437         return readonly;
3438 }
3439
3440 static const char *alloc_name(u64 flags)
3441 {
3442         switch (flags) {
3443         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3444                 return "mixed";
3445         case BTRFS_BLOCK_GROUP_METADATA:
3446                 return "metadata";
3447         case BTRFS_BLOCK_GROUP_DATA:
3448                 return "data";
3449         case BTRFS_BLOCK_GROUP_SYSTEM:
3450                 return "system";
3451         default:
3452                 WARN_ON(1);
3453                 return "invalid-combination";
3454         };
3455 }
3456
3457 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3458                              u64 total_bytes, u64 bytes_used,
3459                              struct btrfs_space_info **space_info)
3460 {
3461         struct btrfs_space_info *found;
3462         int i;
3463         int factor;
3464         int ret;
3465
3466         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3467                      BTRFS_BLOCK_GROUP_RAID10))
3468                 factor = 2;
3469         else
3470                 factor = 1;
3471
3472         found = __find_space_info(info, flags);
3473         if (found) {
3474                 spin_lock(&found->lock);
3475                 found->total_bytes += total_bytes;
3476                 found->disk_total += total_bytes * factor;
3477                 found->bytes_used += bytes_used;
3478                 found->disk_used += bytes_used * factor;
3479                 found->full = 0;
3480                 spin_unlock(&found->lock);
3481                 *space_info = found;
3482                 return 0;
3483         }
3484         found = kzalloc(sizeof(*found), GFP_NOFS);
3485         if (!found)
3486                 return -ENOMEM;
3487
3488         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3489         if (ret) {
3490                 kfree(found);
3491                 return ret;
3492         }
3493
3494         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3495                 INIT_LIST_HEAD(&found->block_groups[i]);
3496         init_rwsem(&found->groups_sem);
3497         spin_lock_init(&found->lock);
3498         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3499         found->total_bytes = total_bytes;
3500         found->disk_total = total_bytes * factor;
3501         found->bytes_used = bytes_used;
3502         found->disk_used = bytes_used * factor;
3503         found->bytes_pinned = 0;
3504         found->bytes_reserved = 0;
3505         found->bytes_readonly = 0;
3506         found->bytes_may_use = 0;
3507         found->full = 0;
3508         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3509         found->chunk_alloc = 0;
3510         found->flush = 0;
3511         init_waitqueue_head(&found->wait);
3512         INIT_LIST_HEAD(&found->ro_bgs);
3513
3514         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3515                                     info->space_info_kobj, "%s",
3516                                     alloc_name(found->flags));
3517         if (ret) {
3518                 kfree(found);
3519                 return ret;
3520         }
3521
3522         *space_info = found;
3523         list_add_rcu(&found->list, &info->space_info);
3524         if (flags & BTRFS_BLOCK_GROUP_DATA)
3525                 info->data_sinfo = found;
3526
3527         return ret;
3528 }
3529
3530 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3531 {
3532         u64 extra_flags = chunk_to_extended(flags) &
3533                                 BTRFS_EXTENDED_PROFILE_MASK;
3534
3535         write_seqlock(&fs_info->profiles_lock);
3536         if (flags & BTRFS_BLOCK_GROUP_DATA)
3537                 fs_info->avail_data_alloc_bits |= extra_flags;
3538         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3539                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3540         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3541                 fs_info->avail_system_alloc_bits |= extra_flags;
3542         write_sequnlock(&fs_info->profiles_lock);
3543 }
3544
3545 /*
3546  * returns target flags in extended format or 0 if restripe for this
3547  * chunk_type is not in progress
3548  *
3549  * should be called with either volume_mutex or balance_lock held
3550  */
3551 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3552 {
3553         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3554         u64 target = 0;
3555
3556         if (!bctl)
3557                 return 0;
3558
3559         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3560             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3561                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3562         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3563                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3564                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3565         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3566                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3567                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3568         }
3569
3570         return target;
3571 }
3572
3573 /*
3574  * @flags: available profiles in extended format (see ctree.h)
3575  *
3576  * Returns reduced profile in chunk format.  If profile changing is in
3577  * progress (either running or paused) picks the target profile (if it's
3578  * already available), otherwise falls back to plain reducing.
3579  */
3580 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3581 {
3582         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3583         u64 target;
3584         u64 tmp;
3585
3586         /*
3587          * see if restripe for this chunk_type is in progress, if so
3588          * try to reduce to the target profile
3589          */
3590         spin_lock(&root->fs_info->balance_lock);
3591         target = get_restripe_target(root->fs_info, flags);
3592         if (target) {
3593                 /* pick target profile only if it's already available */
3594                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3595                         spin_unlock(&root->fs_info->balance_lock);
3596                         return extended_to_chunk(target);
3597                 }
3598         }
3599         spin_unlock(&root->fs_info->balance_lock);
3600
3601         /* First, mask out the RAID levels which aren't possible */
3602         if (num_devices == 1)
3603                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3604                            BTRFS_BLOCK_GROUP_RAID5);
3605         if (num_devices < 3)
3606                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3607         if (num_devices < 4)
3608                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3609
3610         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3611                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3612                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3613         flags &= ~tmp;
3614
3615         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3616                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3617         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3618                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3619         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3620                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3621         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3622                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3623         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3624                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3625
3626         return extended_to_chunk(flags | tmp);
3627 }
3628
3629 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3630 {
3631         unsigned seq;
3632         u64 flags;
3633
3634         do {
3635                 flags = orig_flags;
3636                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3637
3638                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3639                         flags |= root->fs_info->avail_data_alloc_bits;
3640                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3641                         flags |= root->fs_info->avail_system_alloc_bits;
3642                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3643                         flags |= root->fs_info->avail_metadata_alloc_bits;
3644         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3645
3646         return btrfs_reduce_alloc_profile(root, flags);
3647 }
3648
3649 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3650 {
3651         u64 flags;
3652         u64 ret;
3653
3654         if (data)
3655                 flags = BTRFS_BLOCK_GROUP_DATA;
3656         else if (root == root->fs_info->chunk_root)
3657                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3658         else
3659                 flags = BTRFS_BLOCK_GROUP_METADATA;
3660
3661         ret = get_alloc_profile(root, flags);
3662         return ret;
3663 }
3664
3665 /*
3666  * This will check the space that the inode allocates from to make sure we have
3667  * enough space for bytes.
3668  */
3669 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3670 {
3671         struct btrfs_space_info *data_sinfo;
3672         struct btrfs_root *root = BTRFS_I(inode)->root;
3673         struct btrfs_fs_info *fs_info = root->fs_info;
3674         u64 used;
3675         int ret = 0, committed = 0;
3676
3677         /* make sure bytes are sectorsize aligned */
3678         bytes = ALIGN(bytes, root->sectorsize);
3679
3680         if (btrfs_is_free_space_inode(inode)) {
3681                 committed = 1;
3682                 ASSERT(current->journal_info);
3683         }
3684
3685         data_sinfo = fs_info->data_sinfo;
3686         if (!data_sinfo)
3687                 goto alloc;
3688
3689 again:
3690         /* make sure we have enough space to handle the data first */
3691         spin_lock(&data_sinfo->lock);
3692         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3693                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3694                 data_sinfo->bytes_may_use;
3695
3696         if (used + bytes > data_sinfo->total_bytes) {
3697                 struct btrfs_trans_handle *trans;
3698
3699                 /*
3700                  * if we don't have enough free bytes in this space then we need
3701                  * to alloc a new chunk.
3702                  */
3703                 if (!data_sinfo->full) {
3704                         u64 alloc_target;
3705
3706                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3707                         spin_unlock(&data_sinfo->lock);
3708 alloc:
3709                         alloc_target = btrfs_get_alloc_profile(root, 1);
3710                         /*
3711                          * It is ugly that we don't call nolock join
3712                          * transaction for the free space inode case here.
3713                          * But it is safe because we only do the data space
3714                          * reservation for the free space cache in the
3715                          * transaction context, the common join transaction
3716                          * just increase the counter of the current transaction
3717                          * handler, doesn't try to acquire the trans_lock of
3718                          * the fs.
3719                          */
3720                         trans = btrfs_join_transaction(root);
3721                         if (IS_ERR(trans))
3722                                 return PTR_ERR(trans);
3723
3724                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3725                                              alloc_target,
3726                                              CHUNK_ALLOC_NO_FORCE);
3727                         btrfs_end_transaction(trans, root);
3728                         if (ret < 0) {
3729                                 if (ret != -ENOSPC)
3730                                         return ret;
3731                                 else
3732                                         goto commit_trans;
3733                         }
3734
3735                         if (!data_sinfo)
3736                                 data_sinfo = fs_info->data_sinfo;
3737
3738                         goto again;
3739                 }
3740
3741                 /*
3742                  * If we don't have enough pinned space to deal with this
3743                  * allocation don't bother committing the transaction.
3744                  */
3745                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3746                                            bytes) < 0)
3747                         committed = 1;
3748                 spin_unlock(&data_sinfo->lock);
3749
3750                 /* commit the current transaction and try again */
3751 commit_trans:
3752                 if (!committed &&
3753                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3754                         committed = 1;
3755
3756                         trans = btrfs_join_transaction(root);
3757                         if (IS_ERR(trans))
3758                                 return PTR_ERR(trans);
3759                         ret = btrfs_commit_transaction(trans, root);
3760                         if (ret)
3761                                 return ret;
3762                         goto again;
3763                 }
3764
3765                 trace_btrfs_space_reservation(root->fs_info,
3766                                               "space_info:enospc",
3767                                               data_sinfo->flags, bytes, 1);
3768                 return -ENOSPC;
3769         }
3770         data_sinfo->bytes_may_use += bytes;
3771         trace_btrfs_space_reservation(root->fs_info, "space_info",
3772                                       data_sinfo->flags, bytes, 1);
3773         spin_unlock(&data_sinfo->lock);
3774
3775         return 0;
3776 }
3777
3778 /*
3779  * Called if we need to clear a data reservation for this inode.
3780  */
3781 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3782 {
3783         struct btrfs_root *root = BTRFS_I(inode)->root;
3784         struct btrfs_space_info *data_sinfo;
3785
3786         /* make sure bytes are sectorsize aligned */
3787         bytes = ALIGN(bytes, root->sectorsize);
3788
3789         data_sinfo = root->fs_info->data_sinfo;
3790         spin_lock(&data_sinfo->lock);
3791         WARN_ON(data_sinfo->bytes_may_use < bytes);
3792         data_sinfo->bytes_may_use -= bytes;
3793         trace_btrfs_space_reservation(root->fs_info, "space_info",
3794                                       data_sinfo->flags, bytes, 0);
3795         spin_unlock(&data_sinfo->lock);
3796 }
3797
3798 static void force_metadata_allocation(struct btrfs_fs_info *info)
3799 {
3800         struct list_head *head = &info->space_info;
3801         struct btrfs_space_info *found;
3802
3803         rcu_read_lock();
3804         list_for_each_entry_rcu(found, head, list) {
3805                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3806                         found->force_alloc = CHUNK_ALLOC_FORCE;
3807         }
3808         rcu_read_unlock();
3809 }
3810
3811 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3812 {
3813         return (global->size << 1);
3814 }
3815
3816 static int should_alloc_chunk(struct btrfs_root *root,
3817                               struct btrfs_space_info *sinfo, int force)
3818 {
3819         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3820         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3821         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3822         u64 thresh;
3823
3824         if (force == CHUNK_ALLOC_FORCE)
3825                 return 1;
3826
3827         /*
3828          * We need to take into account the global rsv because for all intents
3829          * and purposes it's used space.  Don't worry about locking the
3830          * global_rsv, it doesn't change except when the transaction commits.
3831          */
3832         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3833                 num_allocated += calc_global_rsv_need_space(global_rsv);
3834
3835         /*
3836          * in limited mode, we want to have some free space up to
3837          * about 1% of the FS size.
3838          */
3839         if (force == CHUNK_ALLOC_LIMITED) {
3840                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3841                 thresh = max_t(u64, 64 * 1024 * 1024,
3842                                div_factor_fine(thresh, 1));
3843
3844                 if (num_bytes - num_allocated < thresh)
3845                         return 1;
3846         }
3847
3848         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3849                 return 0;
3850         return 1;
3851 }
3852
3853 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3854 {
3855         u64 num_dev;
3856
3857         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3858                     BTRFS_BLOCK_GROUP_RAID0 |
3859                     BTRFS_BLOCK_GROUP_RAID5 |
3860                     BTRFS_BLOCK_GROUP_RAID6))
3861                 num_dev = root->fs_info->fs_devices->rw_devices;
3862         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3863                 num_dev = 2;
3864         else
3865                 num_dev = 1;    /* DUP or single */
3866
3867         /* metadata for updaing devices and chunk tree */
3868         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3869 }
3870
3871 static void check_system_chunk(struct btrfs_trans_handle *trans,
3872                                struct btrfs_root *root, u64 type)
3873 {
3874         struct btrfs_space_info *info;
3875         u64 left;
3876         u64 thresh;
3877
3878         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3879         spin_lock(&info->lock);
3880         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3881                 info->bytes_reserved - info->bytes_readonly;
3882         spin_unlock(&info->lock);
3883
3884         thresh = get_system_chunk_thresh(root, type);
3885         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3886                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3887                         left, thresh, type);
3888                 dump_space_info(info, 0, 0);
3889         }
3890
3891         if (left < thresh) {
3892                 u64 flags;
3893
3894                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3895                 btrfs_alloc_chunk(trans, root, flags);
3896         }
3897 }
3898
3899 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3900                           struct btrfs_root *extent_root, u64 flags, int force)
3901 {
3902         struct btrfs_space_info *space_info;
3903         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3904         int wait_for_alloc = 0;
3905         int ret = 0;
3906
3907         /* Don't re-enter if we're already allocating a chunk */
3908         if (trans->allocating_chunk)
3909                 return -ENOSPC;
3910
3911         space_info = __find_space_info(extent_root->fs_info, flags);
3912         if (!space_info) {
3913                 ret = update_space_info(extent_root->fs_info, flags,
3914                                         0, 0, &space_info);
3915                 BUG_ON(ret); /* -ENOMEM */
3916         }
3917         BUG_ON(!space_info); /* Logic error */
3918
3919 again:
3920         spin_lock(&space_info->lock);
3921         if (force < space_info->force_alloc)
3922                 force = space_info->force_alloc;
3923         if (space_info->full) {
3924                 if (should_alloc_chunk(extent_root, space_info, force))
3925                         ret = -ENOSPC;
3926                 else
3927                         ret = 0;
3928                 spin_unlock(&space_info->lock);
3929                 return ret;
3930         }
3931
3932         if (!should_alloc_chunk(extent_root, space_info, force)) {
3933                 spin_unlock(&space_info->lock);
3934                 return 0;
3935         } else if (space_info->chunk_alloc) {
3936                 wait_for_alloc = 1;
3937         } else {
3938                 space_info->chunk_alloc = 1;
3939         }
3940
3941         spin_unlock(&space_info->lock);
3942
3943         mutex_lock(&fs_info->chunk_mutex);
3944
3945         /*
3946          * The chunk_mutex is held throughout the entirety of a chunk
3947          * allocation, so once we've acquired the chunk_mutex we know that the
3948          * other guy is done and we need to recheck and see if we should
3949          * allocate.
3950          */
3951         if (wait_for_alloc) {
3952                 mutex_unlock(&fs_info->chunk_mutex);
3953                 wait_for_alloc = 0;
3954                 goto again;
3955         }
3956
3957         trans->allocating_chunk = true;
3958
3959         /*
3960          * If we have mixed data/metadata chunks we want to make sure we keep
3961          * allocating mixed chunks instead of individual chunks.
3962          */
3963         if (btrfs_mixed_space_info(space_info))
3964                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3965
3966         /*
3967          * if we're doing a data chunk, go ahead and make sure that
3968          * we keep a reasonable number of metadata chunks allocated in the
3969          * FS as well.
3970          */
3971         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3972                 fs_info->data_chunk_allocations++;
3973                 if (!(fs_info->data_chunk_allocations %
3974                       fs_info->metadata_ratio))
3975                         force_metadata_allocation(fs_info);
3976         }
3977
3978         /*
3979          * Check if we have enough space in SYSTEM chunk because we may need
3980          * to update devices.
3981          */
3982         check_system_chunk(trans, extent_root, flags);
3983
3984         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3985         trans->allocating_chunk = false;
3986
3987         spin_lock(&space_info->lock);
3988         if (ret < 0 && ret != -ENOSPC)
3989                 goto out;
3990         if (ret)
3991                 space_info->full = 1;
3992         else
3993                 ret = 1;
3994
3995         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3996 out:
3997         space_info->chunk_alloc = 0;
3998         spin_unlock(&space_info->lock);
3999         mutex_unlock(&fs_info->chunk_mutex);
4000         return ret;
4001 }
4002
4003 static int can_overcommit(struct btrfs_root *root,
4004                           struct btrfs_space_info *space_info, u64 bytes,
4005                           enum btrfs_reserve_flush_enum flush)
4006 {
4007         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4008         u64 profile = btrfs_get_alloc_profile(root, 0);
4009         u64 space_size;
4010         u64 avail;
4011         u64 used;
4012
4013         used = space_info->bytes_used + space_info->bytes_reserved +
4014                 space_info->bytes_pinned + space_info->bytes_readonly;
4015
4016         /*
4017          * We only want to allow over committing if we have lots of actual space
4018          * free, but if we don't have enough space to handle the global reserve
4019          * space then we could end up having a real enospc problem when trying
4020          * to allocate a chunk or some other such important allocation.
4021          */
4022         spin_lock(&global_rsv->lock);
4023         space_size = calc_global_rsv_need_space(global_rsv);
4024         spin_unlock(&global_rsv->lock);
4025         if (used + space_size >= space_info->total_bytes)
4026                 return 0;
4027
4028         used += space_info->bytes_may_use;
4029
4030         spin_lock(&root->fs_info->free_chunk_lock);
4031         avail = root->fs_info->free_chunk_space;
4032         spin_unlock(&root->fs_info->free_chunk_lock);
4033
4034         /*
4035          * If we have dup, raid1 or raid10 then only half of the free
4036          * space is actually useable.  For raid56, the space info used
4037          * doesn't include the parity drive, so we don't have to
4038          * change the math
4039          */
4040         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4041                        BTRFS_BLOCK_GROUP_RAID1 |
4042                        BTRFS_BLOCK_GROUP_RAID10))
4043                 avail >>= 1;
4044
4045         /*
4046          * If we aren't flushing all things, let us overcommit up to
4047          * 1/2th of the space. If we can flush, don't let us overcommit
4048          * too much, let it overcommit up to 1/8 of the space.
4049          */
4050         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4051                 avail >>= 3;
4052         else
4053                 avail >>= 1;
4054
4055         if (used + bytes < space_info->total_bytes + avail)
4056                 return 1;
4057         return 0;
4058 }
4059
4060 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4061                                          unsigned long nr_pages, int nr_items)
4062 {
4063         struct super_block *sb = root->fs_info->sb;
4064
4065         if (down_read_trylock(&sb->s_umount)) {
4066                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4067                 up_read(&sb->s_umount);
4068         } else {
4069                 /*
4070                  * We needn't worry the filesystem going from r/w to r/o though
4071                  * we don't acquire ->s_umount mutex, because the filesystem
4072                  * should guarantee the delalloc inodes list be empty after
4073                  * the filesystem is readonly(all dirty pages are written to
4074                  * the disk).
4075                  */
4076                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4077                 if (!current->journal_info)
4078                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4079         }
4080 }
4081
4082 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4083 {
4084         u64 bytes;
4085         int nr;
4086
4087         bytes = btrfs_calc_trans_metadata_size(root, 1);
4088         nr = (int)div64_u64(to_reclaim, bytes);
4089         if (!nr)
4090                 nr = 1;
4091         return nr;
4092 }
4093
4094 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4095
4096 /*
4097  * shrink metadata reservation for delalloc
4098  */
4099 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4100                             bool wait_ordered)
4101 {
4102         struct btrfs_block_rsv *block_rsv;
4103         struct btrfs_space_info *space_info;
4104         struct btrfs_trans_handle *trans;
4105         u64 delalloc_bytes;
4106         u64 max_reclaim;
4107         long time_left;
4108         unsigned long nr_pages;
4109         int loops;
4110         int items;
4111         enum btrfs_reserve_flush_enum flush;
4112
4113         /* Calc the number of the pages we need flush for space reservation */
4114         items = calc_reclaim_items_nr(root, to_reclaim);
4115         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4116
4117         trans = (struct btrfs_trans_handle *)current->journal_info;
4118         block_rsv = &root->fs_info->delalloc_block_rsv;
4119         space_info = block_rsv->space_info;
4120
4121         delalloc_bytes = percpu_counter_sum_positive(
4122                                                 &root->fs_info->delalloc_bytes);
4123         if (delalloc_bytes == 0) {
4124                 if (trans)
4125                         return;
4126                 if (wait_ordered)
4127                         btrfs_wait_ordered_roots(root->fs_info, items);
4128                 return;
4129         }
4130
4131         loops = 0;
4132         while (delalloc_bytes && loops < 3) {
4133                 max_reclaim = min(delalloc_bytes, to_reclaim);
4134                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4135                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4136                 /*
4137                  * We need to wait for the async pages to actually start before
4138                  * we do anything.
4139                  */
4140                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4141                 if (!max_reclaim)
4142                         goto skip_async;
4143
4144                 if (max_reclaim <= nr_pages)
4145                         max_reclaim = 0;
4146                 else
4147                         max_reclaim -= nr_pages;
4148
4149                 wait_event(root->fs_info->async_submit_wait,
4150                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4151                            (int)max_reclaim);
4152 skip_async:
4153                 if (!trans)
4154                         flush = BTRFS_RESERVE_FLUSH_ALL;
4155                 else
4156                         flush = BTRFS_RESERVE_NO_FLUSH;
4157                 spin_lock(&space_info->lock);
4158                 if (can_overcommit(root, space_info, orig, flush)) {
4159                         spin_unlock(&space_info->lock);
4160                         break;
4161                 }
4162                 spin_unlock(&space_info->lock);
4163
4164                 loops++;
4165                 if (wait_ordered && !trans) {
4166                         btrfs_wait_ordered_roots(root->fs_info, items);
4167                 } else {
4168                         time_left = schedule_timeout_killable(1);
4169                         if (time_left)
4170                                 break;
4171                 }
4172                 delalloc_bytes = percpu_counter_sum_positive(
4173                                                 &root->fs_info->delalloc_bytes);
4174         }
4175 }
4176
4177 /**
4178  * maybe_commit_transaction - possibly commit the transaction if its ok to
4179  * @root - the root we're allocating for
4180  * @bytes - the number of bytes we want to reserve
4181  * @force - force the commit
4182  *
4183  * This will check to make sure that committing the transaction will actually
4184  * get us somewhere and then commit the transaction if it does.  Otherwise it
4185  * will return -ENOSPC.
4186  */
4187 static int may_commit_transaction(struct btrfs_root *root,
4188                                   struct btrfs_space_info *space_info,
4189                                   u64 bytes, int force)
4190 {
4191         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4192         struct btrfs_trans_handle *trans;
4193
4194         trans = (struct btrfs_trans_handle *)current->journal_info;
4195         if (trans)
4196                 return -EAGAIN;
4197
4198         if (force)
4199                 goto commit;
4200
4201         /* See if there is enough pinned space to make this reservation */
4202         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4203                                    bytes) >= 0)
4204                 goto commit;
4205
4206         /*
4207          * See if there is some space in the delayed insertion reservation for
4208          * this reservation.
4209          */
4210         if (space_info != delayed_rsv->space_info)
4211                 return -ENOSPC;
4212
4213         spin_lock(&delayed_rsv->lock);
4214         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4215                                    bytes - delayed_rsv->size) >= 0) {
4216                 spin_unlock(&delayed_rsv->lock);
4217                 return -ENOSPC;
4218         }
4219         spin_unlock(&delayed_rsv->lock);
4220
4221 commit:
4222         trans = btrfs_join_transaction(root);
4223         if (IS_ERR(trans))
4224                 return -ENOSPC;
4225
4226         return btrfs_commit_transaction(trans, root);
4227 }
4228
4229 enum flush_state {
4230         FLUSH_DELAYED_ITEMS_NR  =       1,
4231         FLUSH_DELAYED_ITEMS     =       2,
4232         FLUSH_DELALLOC          =       3,
4233         FLUSH_DELALLOC_WAIT     =       4,
4234         ALLOC_CHUNK             =       5,
4235         COMMIT_TRANS            =       6,
4236 };
4237
4238 static int flush_space(struct btrfs_root *root,
4239                        struct btrfs_space_info *space_info, u64 num_bytes,
4240                        u64 orig_bytes, int state)
4241 {
4242         struct btrfs_trans_handle *trans;
4243         int nr;
4244         int ret = 0;
4245
4246         switch (state) {
4247         case FLUSH_DELAYED_ITEMS_NR:
4248         case FLUSH_DELAYED_ITEMS:
4249                 if (state == FLUSH_DELAYED_ITEMS_NR)
4250                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4251                 else
4252                         nr = -1;
4253
4254                 trans = btrfs_join_transaction(root);
4255                 if (IS_ERR(trans)) {
4256                         ret = PTR_ERR(trans);
4257                         break;
4258                 }
4259                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4260                 btrfs_end_transaction(trans, root);
4261                 break;
4262         case FLUSH_DELALLOC:
4263         case FLUSH_DELALLOC_WAIT:
4264                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4265                                 state == FLUSH_DELALLOC_WAIT);
4266                 break;
4267         case ALLOC_CHUNK:
4268                 trans = btrfs_join_transaction(root);
4269                 if (IS_ERR(trans)) {
4270                         ret = PTR_ERR(trans);
4271                         break;
4272                 }
4273                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4274                                      btrfs_get_alloc_profile(root, 0),
4275                                      CHUNK_ALLOC_NO_FORCE);
4276                 btrfs_end_transaction(trans, root);
4277                 if (ret == -ENOSPC)
4278                         ret = 0;
4279                 break;
4280         case COMMIT_TRANS:
4281                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4282                 break;
4283         default:
4284                 ret = -ENOSPC;
4285                 break;
4286         }
4287
4288         return ret;
4289 }
4290
4291 static inline u64
4292 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4293                                  struct btrfs_space_info *space_info)
4294 {
4295         u64 used;
4296         u64 expected;
4297         u64 to_reclaim;
4298
4299         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4300                                 16 * 1024 * 1024);
4301         spin_lock(&space_info->lock);
4302         if (can_overcommit(root, space_info, to_reclaim,
4303                            BTRFS_RESERVE_FLUSH_ALL)) {
4304                 to_reclaim = 0;
4305                 goto out;
4306         }
4307
4308         used = space_info->bytes_used + space_info->bytes_reserved +
4309                space_info->bytes_pinned + space_info->bytes_readonly +
4310                space_info->bytes_may_use;
4311         if (can_overcommit(root, space_info, 1024 * 1024,
4312                            BTRFS_RESERVE_FLUSH_ALL))
4313                 expected = div_factor_fine(space_info->total_bytes, 95);
4314         else
4315                 expected = div_factor_fine(space_info->total_bytes, 90);
4316
4317         if (used > expected)
4318                 to_reclaim = used - expected;
4319         else
4320                 to_reclaim = 0;
4321         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4322                                      space_info->bytes_reserved);
4323 out:
4324         spin_unlock(&space_info->lock);
4325
4326         return to_reclaim;
4327 }
4328
4329 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4330                                         struct btrfs_fs_info *fs_info, u64 used)
4331 {
4332         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4333
4334         /* If we're just plain full then async reclaim just slows us down. */
4335         if (space_info->bytes_used >= thresh)
4336                 return 0;
4337
4338         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4339                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4340 }
4341
4342 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4343                                        struct btrfs_fs_info *fs_info,
4344                                        int flush_state)
4345 {
4346         u64 used;
4347
4348         spin_lock(&space_info->lock);
4349         /*
4350          * We run out of space and have not got any free space via flush_space,
4351          * so don't bother doing async reclaim.
4352          */
4353         if (flush_state > COMMIT_TRANS && space_info->full) {
4354                 spin_unlock(&space_info->lock);
4355                 return 0;
4356         }
4357
4358         used = space_info->bytes_used + space_info->bytes_reserved +
4359                space_info->bytes_pinned + space_info->bytes_readonly +
4360                space_info->bytes_may_use;
4361         if (need_do_async_reclaim(space_info, fs_info, used)) {
4362                 spin_unlock(&space_info->lock);
4363                 return 1;
4364         }
4365         spin_unlock(&space_info->lock);
4366
4367         return 0;
4368 }
4369
4370 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4371 {
4372         struct btrfs_fs_info *fs_info;
4373         struct btrfs_space_info *space_info;
4374         u64 to_reclaim;
4375         int flush_state;
4376
4377         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4378         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4379
4380         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4381                                                       space_info);
4382         if (!to_reclaim)
4383                 return;
4384
4385         flush_state = FLUSH_DELAYED_ITEMS_NR;
4386         do {
4387                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4388                             to_reclaim, flush_state);
4389                 flush_state++;
4390                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4391                                                  flush_state))
4392                         return;
4393         } while (flush_state < COMMIT_TRANS);
4394 }
4395
4396 void btrfs_init_async_reclaim_work(struct work_struct *work)
4397 {
4398         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4399 }
4400
4401 /**
4402  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4403  * @root - the root we're allocating for
4404  * @block_rsv - the block_rsv we're allocating for
4405  * @orig_bytes - the number of bytes we want
4406  * @flush - whether or not we can flush to make our reservation
4407  *
4408  * This will reserve orgi_bytes number of bytes from the space info associated
4409  * with the block_rsv.  If there is not enough space it will make an attempt to
4410  * flush out space to make room.  It will do this by flushing delalloc if
4411  * possible or committing the transaction.  If flush is 0 then no attempts to
4412  * regain reservations will be made and this will fail if there is not enough
4413  * space already.
4414  */
4415 static int reserve_metadata_bytes(struct btrfs_root *root,
4416                                   struct btrfs_block_rsv *block_rsv,
4417                                   u64 orig_bytes,
4418                                   enum btrfs_reserve_flush_enum flush)
4419 {
4420         struct btrfs_space_info *space_info = block_rsv->space_info;
4421         u64 used;
4422         u64 num_bytes = orig_bytes;
4423         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4424         int ret = 0;
4425         bool flushing = false;
4426
4427 again:
4428         ret = 0;
4429         spin_lock(&space_info->lock);
4430         /*
4431          * We only want to wait if somebody other than us is flushing and we
4432          * are actually allowed to flush all things.
4433          */
4434         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4435                space_info->flush) {
4436                 spin_unlock(&space_info->lock);
4437                 /*
4438                  * If we have a trans handle we can't wait because the flusher
4439                  * may have to commit the transaction, which would mean we would
4440                  * deadlock since we are waiting for the flusher to finish, but
4441                  * hold the current transaction open.
4442                  */
4443                 if (current->journal_info)
4444                         return -EAGAIN;
4445                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4446                 /* Must have been killed, return */
4447                 if (ret)
4448                         return -EINTR;
4449
4450                 spin_lock(&space_info->lock);
4451         }
4452
4453         ret = -ENOSPC;
4454         used = space_info->bytes_used + space_info->bytes_reserved +
4455                 space_info->bytes_pinned + space_info->bytes_readonly +
4456                 space_info->bytes_may_use;
4457
4458         /*
4459          * The idea here is that we've not already over-reserved the block group
4460          * then we can go ahead and save our reservation first and then start
4461          * flushing if we need to.  Otherwise if we've already overcommitted
4462          * lets start flushing stuff first and then come back and try to make
4463          * our reservation.
4464          */
4465         if (used <= space_info->total_bytes) {
4466                 if (used + orig_bytes <= space_info->total_bytes) {
4467                         space_info->bytes_may_use += orig_bytes;
4468                         trace_btrfs_space_reservation(root->fs_info,
4469                                 "space_info", space_info->flags, orig_bytes, 1);
4470                         ret = 0;
4471                 } else {
4472                         /*
4473                          * Ok set num_bytes to orig_bytes since we aren't
4474                          * overocmmitted, this way we only try and reclaim what
4475                          * we need.
4476                          */
4477                         num_bytes = orig_bytes;
4478                 }
4479         } else {
4480                 /*
4481                  * Ok we're over committed, set num_bytes to the overcommitted
4482                  * amount plus the amount of bytes that we need for this
4483                  * reservation.
4484                  */
4485                 num_bytes = used - space_info->total_bytes +
4486                         (orig_bytes * 2);
4487         }
4488
4489         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4490                 space_info->bytes_may_use += orig_bytes;
4491                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4492                                               space_info->flags, orig_bytes,
4493                                               1);
4494                 ret = 0;
4495         }
4496
4497         /*
4498          * Couldn't make our reservation, save our place so while we're trying
4499          * to reclaim space we can actually use it instead of somebody else
4500          * stealing it from us.
4501          *
4502          * We make the other tasks wait for the flush only when we can flush
4503          * all things.
4504          */
4505         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4506                 flushing = true;
4507                 space_info->flush = 1;
4508         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4509                 used += orig_bytes;
4510                 /*
4511                  * We will do the space reservation dance during log replay,
4512                  * which means we won't have fs_info->fs_root set, so don't do
4513                  * the async reclaim as we will panic.
4514                  */
4515                 if (!root->fs_info->log_root_recovering &&
4516                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4517                     !work_busy(&root->fs_info->async_reclaim_work))
4518                         queue_work(system_unbound_wq,
4519                                    &root->fs_info->async_reclaim_work);
4520         }
4521         spin_unlock(&space_info->lock);
4522
4523         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4524                 goto out;
4525
4526         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4527                           flush_state);
4528         flush_state++;
4529
4530         /*
4531          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4532          * would happen. So skip delalloc flush.
4533          */
4534         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4535             (flush_state == FLUSH_DELALLOC ||
4536              flush_state == FLUSH_DELALLOC_WAIT))
4537                 flush_state = ALLOC_CHUNK;
4538
4539         if (!ret)
4540                 goto again;
4541         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4542                  flush_state < COMMIT_TRANS)
4543                 goto again;
4544         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4545                  flush_state <= COMMIT_TRANS)
4546                 goto again;
4547
4548 out:
4549         if (ret == -ENOSPC &&
4550             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4551                 struct btrfs_block_rsv *global_rsv =
4552                         &root->fs_info->global_block_rsv;
4553
4554                 if (block_rsv != global_rsv &&
4555                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4556                         ret = 0;
4557         }
4558         if (ret == -ENOSPC)
4559                 trace_btrfs_space_reservation(root->fs_info,
4560                                               "space_info:enospc",
4561                                               space_info->flags, orig_bytes, 1);
4562         if (flushing) {
4563                 spin_lock(&space_info->lock);
4564                 space_info->flush = 0;
4565                 wake_up_all(&space_info->wait);
4566                 spin_unlock(&space_info->lock);
4567         }
4568         return ret;
4569 }
4570
4571 static struct btrfs_block_rsv *get_block_rsv(
4572                                         const struct btrfs_trans_handle *trans,
4573                                         const struct btrfs_root *root)
4574 {
4575         struct btrfs_block_rsv *block_rsv = NULL;
4576
4577         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4578                 block_rsv = trans->block_rsv;
4579
4580         if (root == root->fs_info->csum_root && trans->adding_csums)
4581                 block_rsv = trans->block_rsv;
4582
4583         if (root == root->fs_info->uuid_root)
4584                 block_rsv = trans->block_rsv;
4585
4586         if (!block_rsv)
4587                 block_rsv = root->block_rsv;
4588
4589         if (!block_rsv)
4590                 block_rsv = &root->fs_info->empty_block_rsv;
4591
4592         return block_rsv;
4593 }
4594
4595 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4596                                u64 num_bytes)
4597 {
4598         int ret = -ENOSPC;
4599         spin_lock(&block_rsv->lock);
4600         if (block_rsv->reserved >= num_bytes) {
4601                 block_rsv->reserved -= num_bytes;
4602                 if (block_rsv->reserved < block_rsv->size)
4603                         block_rsv->full = 0;
4604                 ret = 0;
4605         }
4606         spin_unlock(&block_rsv->lock);
4607         return ret;
4608 }
4609
4610 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4611                                 u64 num_bytes, int update_size)
4612 {
4613         spin_lock(&block_rsv->lock);
4614         block_rsv->reserved += num_bytes;
4615         if (update_size)
4616                 block_rsv->size += num_bytes;
4617         else if (block_rsv->reserved >= block_rsv->size)
4618                 block_rsv->full = 1;
4619         spin_unlock(&block_rsv->lock);
4620 }
4621
4622 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4623                              struct btrfs_block_rsv *dest, u64 num_bytes,
4624                              int min_factor)
4625 {
4626         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4627         u64 min_bytes;
4628
4629         if (global_rsv->space_info != dest->space_info)
4630                 return -ENOSPC;
4631
4632         spin_lock(&global_rsv->lock);
4633         min_bytes = div_factor(global_rsv->size, min_factor);
4634         if (global_rsv->reserved < min_bytes + num_bytes) {
4635                 spin_unlock(&global_rsv->lock);
4636                 return -ENOSPC;
4637         }
4638         global_rsv->reserved -= num_bytes;
4639         if (global_rsv->reserved < global_rsv->size)
4640                 global_rsv->full = 0;
4641         spin_unlock(&global_rsv->lock);
4642
4643         block_rsv_add_bytes(dest, num_bytes, 1);
4644         return 0;
4645 }
4646
4647 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4648                                     struct btrfs_block_rsv *block_rsv,
4649                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4650 {
4651         struct btrfs_space_info *space_info = block_rsv->space_info;
4652
4653         spin_lock(&block_rsv->lock);
4654         if (num_bytes == (u64)-1)
4655                 num_bytes = block_rsv->size;
4656         block_rsv->size -= num_bytes;
4657         if (block_rsv->reserved >= block_rsv->size) {
4658                 num_bytes = block_rsv->reserved - block_rsv->size;
4659                 block_rsv->reserved = block_rsv->size;
4660                 block_rsv->full = 1;
4661         } else {
4662                 num_bytes = 0;
4663         }
4664         spin_unlock(&block_rsv->lock);
4665
4666         if (num_bytes > 0) {
4667                 if (dest) {
4668                         spin_lock(&dest->lock);
4669                         if (!dest->full) {
4670                                 u64 bytes_to_add;
4671
4672                                 bytes_to_add = dest->size - dest->reserved;
4673                                 bytes_to_add = min(num_bytes, bytes_to_add);
4674                                 dest->reserved += bytes_to_add;
4675                                 if (dest->reserved >= dest->size)
4676                                         dest->full = 1;
4677                                 num_bytes -= bytes_to_add;
4678                         }
4679                         spin_unlock(&dest->lock);
4680                 }
4681                 if (num_bytes) {
4682                         spin_lock(&space_info->lock);
4683                         space_info->bytes_may_use -= num_bytes;
4684                         trace_btrfs_space_reservation(fs_info, "space_info",
4685                                         space_info->flags, num_bytes, 0);
4686                         spin_unlock(&space_info->lock);
4687                 }
4688         }
4689 }
4690
4691 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4692                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4693 {
4694         int ret;
4695
4696         ret = block_rsv_use_bytes(src, num_bytes);
4697         if (ret)
4698                 return ret;
4699
4700         block_rsv_add_bytes(dst, num_bytes, 1);
4701         return 0;
4702 }
4703
4704 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4705 {
4706         memset(rsv, 0, sizeof(*rsv));
4707         spin_lock_init(&rsv->lock);
4708         rsv->type = type;
4709 }
4710
4711 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4712                                               unsigned short type)
4713 {
4714         struct btrfs_block_rsv *block_rsv;
4715         struct btrfs_fs_info *fs_info = root->fs_info;
4716
4717         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4718         if (!block_rsv)
4719                 return NULL;
4720
4721         btrfs_init_block_rsv(block_rsv, type);
4722         block_rsv->space_info = __find_space_info(fs_info,
4723                                                   BTRFS_BLOCK_GROUP_METADATA);
4724         return block_rsv;
4725 }
4726
4727 void btrfs_free_block_rsv(struct btrfs_root *root,
4728                           struct btrfs_block_rsv *rsv)
4729 {
4730         if (!rsv)
4731                 return;
4732         btrfs_block_rsv_release(root, rsv, (u64)-1);
4733         kfree(rsv);
4734 }
4735
4736 int btrfs_block_rsv_add(struct btrfs_root *root,
4737                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4738                         enum btrfs_reserve_flush_enum flush)
4739 {
4740         int ret;
4741
4742         if (num_bytes == 0)
4743                 return 0;
4744
4745         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4746         if (!ret) {
4747                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4748                 return 0;
4749         }
4750
4751         return ret;
4752 }
4753
4754 int btrfs_block_rsv_check(struct btrfs_root *root,
4755                           struct btrfs_block_rsv *block_rsv, int min_factor)
4756 {
4757         u64 num_bytes = 0;
4758         int ret = -ENOSPC;
4759
4760         if (!block_rsv)
4761                 return 0;
4762
4763         spin_lock(&block_rsv->lock);
4764         num_bytes = div_factor(block_rsv->size, min_factor);
4765         if (block_rsv->reserved >= num_bytes)
4766                 ret = 0;
4767         spin_unlock(&block_rsv->lock);
4768
4769         return ret;
4770 }
4771
4772 int btrfs_block_rsv_refill(struct btrfs_root *root,
4773                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4774                            enum btrfs_reserve_flush_enum flush)
4775 {
4776         u64 num_bytes = 0;
4777         int ret = -ENOSPC;
4778
4779         if (!block_rsv)
4780                 return 0;
4781
4782         spin_lock(&block_rsv->lock);
4783         num_bytes = min_reserved;
4784         if (block_rsv->reserved >= num_bytes)
4785                 ret = 0;
4786         else
4787                 num_bytes -= block_rsv->reserved;
4788         spin_unlock(&block_rsv->lock);
4789
4790         if (!ret)
4791                 return 0;
4792
4793         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4794         if (!ret) {
4795                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4796                 return 0;
4797         }
4798
4799         return ret;
4800 }
4801
4802 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4803                             struct btrfs_block_rsv *dst_rsv,
4804                             u64 num_bytes)
4805 {
4806         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4807 }
4808
4809 void btrfs_block_rsv_release(struct btrfs_root *root,
4810                              struct btrfs_block_rsv *block_rsv,
4811                              u64 num_bytes)
4812 {
4813         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4814         if (global_rsv == block_rsv ||
4815             block_rsv->space_info != global_rsv->space_info)
4816                 global_rsv = NULL;
4817         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4818                                 num_bytes);
4819 }
4820
4821 /*
4822  * helper to calculate size of global block reservation.
4823  * the desired value is sum of space used by extent tree,
4824  * checksum tree and root tree
4825  */
4826 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4827 {
4828         struct btrfs_space_info *sinfo;
4829         u64 num_bytes;
4830         u64 meta_used;
4831         u64 data_used;
4832         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4833
4834         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4835         spin_lock(&sinfo->lock);
4836         data_used = sinfo->bytes_used;
4837         spin_unlock(&sinfo->lock);
4838
4839         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4840         spin_lock(&sinfo->lock);
4841         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4842                 data_used = 0;
4843         meta_used = sinfo->bytes_used;
4844         spin_unlock(&sinfo->lock);
4845
4846         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4847                     csum_size * 2;
4848         num_bytes += div_u64(data_used + meta_used, 50);
4849
4850         if (num_bytes * 3 > meta_used)
4851                 num_bytes = div_u64(meta_used, 3);
4852
4853         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4854 }
4855
4856 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4857 {
4858         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4859         struct btrfs_space_info *sinfo = block_rsv->space_info;
4860         u64 num_bytes;
4861
4862         num_bytes = calc_global_metadata_size(fs_info);
4863
4864         spin_lock(&sinfo->lock);
4865         spin_lock(&block_rsv->lock);
4866
4867         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4868
4869         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4870                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4871                     sinfo->bytes_may_use;
4872
4873         if (sinfo->total_bytes > num_bytes) {
4874                 num_bytes = sinfo->total_bytes - num_bytes;
4875                 block_rsv->reserved += num_bytes;
4876                 sinfo->bytes_may_use += num_bytes;
4877                 trace_btrfs_space_reservation(fs_info, "space_info",
4878                                       sinfo->flags, num_bytes, 1);
4879         }
4880
4881         if (block_rsv->reserved >= block_rsv->size) {
4882                 num_bytes = block_rsv->reserved - block_rsv->size;
4883                 sinfo->bytes_may_use -= num_bytes;
4884                 trace_btrfs_space_reservation(fs_info, "space_info",
4885                                       sinfo->flags, num_bytes, 0);
4886                 block_rsv->reserved = block_rsv->size;
4887                 block_rsv->full = 1;
4888         }
4889
4890         spin_unlock(&block_rsv->lock);
4891         spin_unlock(&sinfo->lock);
4892 }
4893
4894 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4895 {
4896         struct btrfs_space_info *space_info;
4897
4898         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4899         fs_info->chunk_block_rsv.space_info = space_info;
4900
4901         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4902         fs_info->global_block_rsv.space_info = space_info;
4903         fs_info->delalloc_block_rsv.space_info = space_info;
4904         fs_info->trans_block_rsv.space_info = space_info;
4905         fs_info->empty_block_rsv.space_info = space_info;
4906         fs_info->delayed_block_rsv.space_info = space_info;
4907
4908         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4909         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4910         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4911         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4912         if (fs_info->quota_root)
4913                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4914         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4915
4916         update_global_block_rsv(fs_info);
4917 }
4918
4919 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4920 {
4921         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4922                                 (u64)-1);
4923         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4924         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4925         WARN_ON(fs_info->trans_block_rsv.size > 0);
4926         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4927         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4928         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4929         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4930         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4931 }
4932
4933 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4934                                   struct btrfs_root *root)
4935 {
4936         if (!trans->block_rsv)
4937                 return;
4938
4939         if (!trans->bytes_reserved)
4940                 return;
4941
4942         trace_btrfs_space_reservation(root->fs_info, "transaction",
4943                                       trans->transid, trans->bytes_reserved, 0);
4944         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4945         trans->bytes_reserved = 0;
4946 }
4947
4948 /* Can only return 0 or -ENOSPC */
4949 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4950                                   struct inode *inode)
4951 {
4952         struct btrfs_root *root = BTRFS_I(inode)->root;
4953         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4954         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4955
4956         /*
4957          * We need to hold space in order to delete our orphan item once we've
4958          * added it, so this takes the reservation so we can release it later
4959          * when we are truly done with the orphan item.
4960          */
4961         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4962         trace_btrfs_space_reservation(root->fs_info, "orphan",
4963                                       btrfs_ino(inode), num_bytes, 1);
4964         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4965 }
4966
4967 void btrfs_orphan_release_metadata(struct inode *inode)
4968 {
4969         struct btrfs_root *root = BTRFS_I(inode)->root;
4970         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4971         trace_btrfs_space_reservation(root->fs_info, "orphan",
4972                                       btrfs_ino(inode), num_bytes, 0);
4973         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4974 }
4975
4976 /*
4977  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4978  * root: the root of the parent directory
4979  * rsv: block reservation
4980  * items: the number of items that we need do reservation
4981  * qgroup_reserved: used to return the reserved size in qgroup
4982  *
4983  * This function is used to reserve the space for snapshot/subvolume
4984  * creation and deletion. Those operations are different with the
4985  * common file/directory operations, they change two fs/file trees
4986  * and root tree, the number of items that the qgroup reserves is
4987  * different with the free space reservation. So we can not use
4988  * the space reseravtion mechanism in start_transaction().
4989  */
4990 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4991                                      struct btrfs_block_rsv *rsv,
4992                                      int items,
4993                                      u64 *qgroup_reserved,
4994                                      bool use_global_rsv)
4995 {
4996         u64 num_bytes;
4997         int ret;
4998         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4999
5000         if (root->fs_info->quota_enabled) {
5001                 /* One for parent inode, two for dir entries */
5002                 num_bytes = 3 * root->nodesize;
5003                 ret = btrfs_qgroup_reserve(root, num_bytes);
5004                 if (ret)
5005                         return ret;
5006         } else {
5007                 num_bytes = 0;
5008         }
5009
5010         *qgroup_reserved = num_bytes;
5011
5012         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5013         rsv->space_info = __find_space_info(root->fs_info,
5014                                             BTRFS_BLOCK_GROUP_METADATA);
5015         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5016                                   BTRFS_RESERVE_FLUSH_ALL);
5017
5018         if (ret == -ENOSPC && use_global_rsv)
5019                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5020
5021         if (ret) {
5022                 if (*qgroup_reserved)
5023                         btrfs_qgroup_free(root, *qgroup_reserved);
5024         }
5025
5026         return ret;
5027 }
5028
5029 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5030                                       struct btrfs_block_rsv *rsv,
5031                                       u64 qgroup_reserved)
5032 {
5033         btrfs_block_rsv_release(root, rsv, (u64)-1);
5034         if (qgroup_reserved)
5035                 btrfs_qgroup_free(root, qgroup_reserved);
5036 }
5037
5038 /**
5039  * drop_outstanding_extent - drop an outstanding extent
5040  * @inode: the inode we're dropping the extent for
5041  * @num_bytes: the number of bytes we're relaseing.
5042  *
5043  * This is called when we are freeing up an outstanding extent, either called
5044  * after an error or after an extent is written.  This will return the number of
5045  * reserved extents that need to be freed.  This must be called with
5046  * BTRFS_I(inode)->lock held.
5047  */
5048 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5049 {
5050         unsigned drop_inode_space = 0;
5051         unsigned dropped_extents = 0;
5052         unsigned num_extents = 0;
5053
5054         num_extents = (unsigned)div64_u64(num_bytes +
5055                                           BTRFS_MAX_EXTENT_SIZE - 1,
5056                                           BTRFS_MAX_EXTENT_SIZE);
5057         ASSERT(num_extents);
5058         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5059         BTRFS_I(inode)->outstanding_extents -= num_extents;
5060
5061         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5062             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5063                                &BTRFS_I(inode)->runtime_flags))
5064                 drop_inode_space = 1;
5065
5066         /*
5067          * If we have more or the same amount of outsanding extents than we have
5068          * reserved then we need to leave the reserved extents count alone.
5069          */
5070         if (BTRFS_I(inode)->outstanding_extents >=
5071             BTRFS_I(inode)->reserved_extents)
5072                 return drop_inode_space;
5073
5074         dropped_extents = BTRFS_I(inode)->reserved_extents -
5075                 BTRFS_I(inode)->outstanding_extents;
5076         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5077         return dropped_extents + drop_inode_space;
5078 }
5079
5080 /**
5081  * calc_csum_metadata_size - return the amount of metada space that must be
5082  *      reserved/free'd for the given bytes.
5083  * @inode: the inode we're manipulating
5084  * @num_bytes: the number of bytes in question
5085  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5086  *
5087  * This adjusts the number of csum_bytes in the inode and then returns the
5088  * correct amount of metadata that must either be reserved or freed.  We
5089  * calculate how many checksums we can fit into one leaf and then divide the
5090  * number of bytes that will need to be checksumed by this value to figure out
5091  * how many checksums will be required.  If we are adding bytes then the number
5092  * may go up and we will return the number of additional bytes that must be
5093  * reserved.  If it is going down we will return the number of bytes that must
5094  * be freed.
5095  *
5096  * This must be called with BTRFS_I(inode)->lock held.
5097  */
5098 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5099                                    int reserve)
5100 {
5101         struct btrfs_root *root = BTRFS_I(inode)->root;
5102         u64 old_csums, num_csums;
5103
5104         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5105             BTRFS_I(inode)->csum_bytes == 0)
5106                 return 0;
5107
5108         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5109         if (reserve)
5110                 BTRFS_I(inode)->csum_bytes += num_bytes;
5111         else
5112                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5113         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5114
5115         /* No change, no need to reserve more */
5116         if (old_csums == num_csums)
5117                 return 0;
5118
5119         if (reserve)
5120                 return btrfs_calc_trans_metadata_size(root,
5121                                                       num_csums - old_csums);
5122
5123         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5124 }
5125
5126 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5127 {
5128         struct btrfs_root *root = BTRFS_I(inode)->root;
5129         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5130         u64 to_reserve = 0;
5131         u64 csum_bytes;
5132         unsigned nr_extents = 0;
5133         int extra_reserve = 0;
5134         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5135         int ret = 0;
5136         bool delalloc_lock = true;
5137         u64 to_free = 0;
5138         unsigned dropped;
5139
5140         /* If we are a free space inode we need to not flush since we will be in
5141          * the middle of a transaction commit.  We also don't need the delalloc
5142          * mutex since we won't race with anybody.  We need this mostly to make
5143          * lockdep shut its filthy mouth.
5144          */
5145         if (btrfs_is_free_space_inode(inode)) {
5146                 flush = BTRFS_RESERVE_NO_FLUSH;
5147                 delalloc_lock = false;
5148         }
5149
5150         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5151             btrfs_transaction_in_commit(root->fs_info))
5152                 schedule_timeout(1);
5153
5154         if (delalloc_lock)
5155                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5156
5157         num_bytes = ALIGN(num_bytes, root->sectorsize);
5158
5159         spin_lock(&BTRFS_I(inode)->lock);
5160         nr_extents = (unsigned)div64_u64(num_bytes +
5161                                          BTRFS_MAX_EXTENT_SIZE - 1,
5162                                          BTRFS_MAX_EXTENT_SIZE);
5163         BTRFS_I(inode)->outstanding_extents += nr_extents;
5164         nr_extents = 0;
5165
5166         if (BTRFS_I(inode)->outstanding_extents >
5167             BTRFS_I(inode)->reserved_extents)
5168                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5169                         BTRFS_I(inode)->reserved_extents;
5170
5171         /*
5172          * Add an item to reserve for updating the inode when we complete the
5173          * delalloc io.
5174          */
5175         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5176                       &BTRFS_I(inode)->runtime_flags)) {
5177                 nr_extents++;
5178                 extra_reserve = 1;
5179         }
5180
5181         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5182         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5183         csum_bytes = BTRFS_I(inode)->csum_bytes;
5184         spin_unlock(&BTRFS_I(inode)->lock);
5185
5186         if (root->fs_info->quota_enabled) {
5187                 ret = btrfs_qgroup_reserve(root, num_bytes +
5188                                            nr_extents * root->nodesize);
5189                 if (ret)
5190                         goto out_fail;
5191         }
5192
5193         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5194         if (unlikely(ret)) {
5195                 if (root->fs_info->quota_enabled)
5196                         btrfs_qgroup_free(root, num_bytes +
5197                                                 nr_extents * root->nodesize);
5198                 goto out_fail;
5199         }
5200
5201         spin_lock(&BTRFS_I(inode)->lock);
5202         if (extra_reserve) {
5203                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5204                         &BTRFS_I(inode)->runtime_flags);
5205                 nr_extents--;
5206         }
5207         BTRFS_I(inode)->reserved_extents += nr_extents;
5208         spin_unlock(&BTRFS_I(inode)->lock);
5209
5210         if (delalloc_lock)
5211                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5212
5213         if (to_reserve)
5214                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5215                                               btrfs_ino(inode), to_reserve, 1);
5216         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5217
5218         return 0;
5219
5220 out_fail:
5221         spin_lock(&BTRFS_I(inode)->lock);
5222         dropped = drop_outstanding_extent(inode, num_bytes);
5223         /*
5224          * If the inodes csum_bytes is the same as the original
5225          * csum_bytes then we know we haven't raced with any free()ers
5226          * so we can just reduce our inodes csum bytes and carry on.
5227          */
5228         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5229                 calc_csum_metadata_size(inode, num_bytes, 0);
5230         } else {
5231                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5232                 u64 bytes;
5233
5234                 /*
5235                  * This is tricky, but first we need to figure out how much we
5236                  * free'd from any free-ers that occured during this
5237                  * reservation, so we reset ->csum_bytes to the csum_bytes
5238                  * before we dropped our lock, and then call the free for the
5239                  * number of bytes that were freed while we were trying our
5240                  * reservation.
5241                  */
5242                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5243                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5244                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5245
5246
5247                 /*
5248                  * Now we need to see how much we would have freed had we not
5249                  * been making this reservation and our ->csum_bytes were not
5250                  * artificially inflated.
5251                  */
5252                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5253                 bytes = csum_bytes - orig_csum_bytes;
5254                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5255
5256                 /*
5257                  * Now reset ->csum_bytes to what it should be.  If bytes is
5258                  * more than to_free then we would have free'd more space had we
5259                  * not had an artificially high ->csum_bytes, so we need to free
5260                  * the remainder.  If bytes is the same or less then we don't
5261                  * need to do anything, the other free-ers did the correct
5262                  * thing.
5263                  */
5264                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5265                 if (bytes > to_free)
5266                         to_free = bytes - to_free;
5267                 else
5268                         to_free = 0;
5269         }
5270         spin_unlock(&BTRFS_I(inode)->lock);
5271         if (dropped)
5272                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5273
5274         if (to_free) {
5275                 btrfs_block_rsv_release(root, block_rsv, to_free);
5276                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5277                                               btrfs_ino(inode), to_free, 0);
5278         }
5279         if (delalloc_lock)
5280                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5281         return ret;
5282 }
5283
5284 /**
5285  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5286  * @inode: the inode to release the reservation for
5287  * @num_bytes: the number of bytes we're releasing
5288  *
5289  * This will release the metadata reservation for an inode.  This can be called
5290  * once we complete IO for a given set of bytes to release their metadata
5291  * reservations.
5292  */
5293 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5294 {
5295         struct btrfs_root *root = BTRFS_I(inode)->root;
5296         u64 to_free = 0;
5297         unsigned dropped;
5298
5299         num_bytes = ALIGN(num_bytes, root->sectorsize);
5300         spin_lock(&BTRFS_I(inode)->lock);
5301         dropped = drop_outstanding_extent(inode, num_bytes);
5302
5303         if (num_bytes)
5304                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5305         spin_unlock(&BTRFS_I(inode)->lock);
5306         if (dropped > 0)
5307                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5308
5309         if (btrfs_test_is_dummy_root(root))
5310                 return;
5311
5312         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5313                                       btrfs_ino(inode), to_free, 0);
5314         if (root->fs_info->quota_enabled) {
5315                 btrfs_qgroup_free(root, num_bytes +
5316                                         dropped * root->nodesize);
5317         }
5318
5319         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5320                                 to_free);
5321 }
5322
5323 /**
5324  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5325  * @inode: inode we're writing to
5326  * @num_bytes: the number of bytes we want to allocate
5327  *
5328  * This will do the following things
5329  *
5330  * o reserve space in the data space info for num_bytes
5331  * o reserve space in the metadata space info based on number of outstanding
5332  *   extents and how much csums will be needed
5333  * o add to the inodes ->delalloc_bytes
5334  * o add it to the fs_info's delalloc inodes list.
5335  *
5336  * This will return 0 for success and -ENOSPC if there is no space left.
5337  */
5338 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5339 {
5340         int ret;
5341
5342         ret = btrfs_check_data_free_space(inode, num_bytes);
5343         if (ret)
5344                 return ret;
5345
5346         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5347         if (ret) {
5348                 btrfs_free_reserved_data_space(inode, num_bytes);
5349                 return ret;
5350         }
5351
5352         return 0;
5353 }
5354
5355 /**
5356  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5357  * @inode: inode we're releasing space for
5358  * @num_bytes: the number of bytes we want to free up
5359  *
5360  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5361  * called in the case that we don't need the metadata AND data reservations
5362  * anymore.  So if there is an error or we insert an inline extent.
5363  *
5364  * This function will release the metadata space that was not used and will
5365  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5366  * list if there are no delalloc bytes left.
5367  */
5368 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5369 {
5370         btrfs_delalloc_release_metadata(inode, num_bytes);
5371         btrfs_free_reserved_data_space(inode, num_bytes);
5372 }
5373
5374 static int update_block_group(struct btrfs_trans_handle *trans,
5375                               struct btrfs_root *root, u64 bytenr,
5376                               u64 num_bytes, int alloc)
5377 {
5378         struct btrfs_block_group_cache *cache = NULL;
5379         struct btrfs_fs_info *info = root->fs_info;
5380         u64 total = num_bytes;
5381         u64 old_val;
5382         u64 byte_in_group;
5383         int factor;
5384
5385         /* block accounting for super block */
5386         spin_lock(&info->delalloc_root_lock);
5387         old_val = btrfs_super_bytes_used(info->super_copy);
5388         if (alloc)
5389                 old_val += num_bytes;
5390         else
5391                 old_val -= num_bytes;
5392         btrfs_set_super_bytes_used(info->super_copy, old_val);
5393         spin_unlock(&info->delalloc_root_lock);
5394
5395         while (total) {
5396                 cache = btrfs_lookup_block_group(info, bytenr);
5397                 if (!cache)
5398                         return -ENOENT;
5399                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5400                                     BTRFS_BLOCK_GROUP_RAID1 |
5401                                     BTRFS_BLOCK_GROUP_RAID10))
5402                         factor = 2;
5403                 else
5404                         factor = 1;
5405                 /*
5406                  * If this block group has free space cache written out, we
5407                  * need to make sure to load it if we are removing space.  This
5408                  * is because we need the unpinning stage to actually add the
5409                  * space back to the block group, otherwise we will leak space.
5410                  */
5411                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5412                         cache_block_group(cache, 1);
5413
5414                 spin_lock(&trans->transaction->dirty_bgs_lock);
5415                 if (list_empty(&cache->dirty_list)) {
5416                         list_add_tail(&cache->dirty_list,
5417                                       &trans->transaction->dirty_bgs);
5418                                 trans->transaction->num_dirty_bgs++;
5419                         btrfs_get_block_group(cache);
5420                 }
5421                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5422
5423                 byte_in_group = bytenr - cache->key.objectid;
5424                 WARN_ON(byte_in_group > cache->key.offset);
5425
5426                 spin_lock(&cache->space_info->lock);
5427                 spin_lock(&cache->lock);
5428
5429                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5430                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5431                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5432
5433                 old_val = btrfs_block_group_used(&cache->item);
5434                 num_bytes = min(total, cache->key.offset - byte_in_group);
5435                 if (alloc) {
5436                         old_val += num_bytes;
5437                         btrfs_set_block_group_used(&cache->item, old_val);
5438                         cache->reserved -= num_bytes;
5439                         cache->space_info->bytes_reserved -= num_bytes;
5440                         cache->space_info->bytes_used += num_bytes;
5441                         cache->space_info->disk_used += num_bytes * factor;
5442                         spin_unlock(&cache->lock);
5443                         spin_unlock(&cache->space_info->lock);
5444                 } else {
5445                         old_val -= num_bytes;
5446                         btrfs_set_block_group_used(&cache->item, old_val);
5447                         cache->pinned += num_bytes;
5448                         cache->space_info->bytes_pinned += num_bytes;
5449                         cache->space_info->bytes_used -= num_bytes;
5450                         cache->space_info->disk_used -= num_bytes * factor;
5451                         spin_unlock(&cache->lock);
5452                         spin_unlock(&cache->space_info->lock);
5453
5454                         set_extent_dirty(info->pinned_extents,
5455                                          bytenr, bytenr + num_bytes - 1,
5456                                          GFP_NOFS | __GFP_NOFAIL);
5457                         /*
5458                          * No longer have used bytes in this block group, queue
5459                          * it for deletion.
5460                          */
5461                         if (old_val == 0) {
5462                                 spin_lock(&info->unused_bgs_lock);
5463                                 if (list_empty(&cache->bg_list)) {
5464                                         btrfs_get_block_group(cache);
5465                                         list_add_tail(&cache->bg_list,
5466                                                       &info->unused_bgs);
5467                                 }
5468                                 spin_unlock(&info->unused_bgs_lock);
5469                         }
5470                 }
5471                 btrfs_put_block_group(cache);
5472                 total -= num_bytes;
5473                 bytenr += num_bytes;
5474         }
5475         return 0;
5476 }
5477
5478 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5479 {
5480         struct btrfs_block_group_cache *cache;
5481         u64 bytenr;
5482
5483         spin_lock(&root->fs_info->block_group_cache_lock);
5484         bytenr = root->fs_info->first_logical_byte;
5485         spin_unlock(&root->fs_info->block_group_cache_lock);
5486
5487         if (bytenr < (u64)-1)
5488                 return bytenr;
5489
5490         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5491         if (!cache)
5492                 return 0;
5493
5494         bytenr = cache->key.objectid;
5495         btrfs_put_block_group(cache);
5496
5497         return bytenr;
5498 }
5499
5500 static int pin_down_extent(struct btrfs_root *root,
5501                            struct btrfs_block_group_cache *cache,
5502                            u64 bytenr, u64 num_bytes, int reserved)
5503 {
5504         spin_lock(&cache->space_info->lock);
5505         spin_lock(&cache->lock);
5506         cache->pinned += num_bytes;
5507         cache->space_info->bytes_pinned += num_bytes;
5508         if (reserved) {
5509                 cache->reserved -= num_bytes;
5510                 cache->space_info->bytes_reserved -= num_bytes;
5511         }
5512         spin_unlock(&cache->lock);
5513         spin_unlock(&cache->space_info->lock);
5514
5515         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5516                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5517         if (reserved)
5518                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5519         return 0;
5520 }
5521
5522 /*
5523  * this function must be called within transaction
5524  */
5525 int btrfs_pin_extent(struct btrfs_root *root,
5526                      u64 bytenr, u64 num_bytes, int reserved)
5527 {
5528         struct btrfs_block_group_cache *cache;
5529
5530         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5531         BUG_ON(!cache); /* Logic error */
5532
5533         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5534
5535         btrfs_put_block_group(cache);
5536         return 0;
5537 }
5538
5539 /*
5540  * this function must be called within transaction
5541  */
5542 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5543                                     u64 bytenr, u64 num_bytes)
5544 {
5545         struct btrfs_block_group_cache *cache;
5546         int ret;
5547
5548         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5549         if (!cache)
5550                 return -EINVAL;
5551
5552         /*
5553          * pull in the free space cache (if any) so that our pin
5554          * removes the free space from the cache.  We have load_only set
5555          * to one because the slow code to read in the free extents does check
5556          * the pinned extents.
5557          */
5558         cache_block_group(cache, 1);
5559
5560         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5561
5562         /* remove us from the free space cache (if we're there at all) */
5563         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5564         btrfs_put_block_group(cache);
5565         return ret;
5566 }
5567
5568 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5569 {
5570         int ret;
5571         struct btrfs_block_group_cache *block_group;
5572         struct btrfs_caching_control *caching_ctl;
5573
5574         block_group = btrfs_lookup_block_group(root->fs_info, start);
5575         if (!block_group)
5576                 return -EINVAL;
5577
5578         cache_block_group(block_group, 0);
5579         caching_ctl = get_caching_control(block_group);
5580
5581         if (!caching_ctl) {
5582                 /* Logic error */
5583                 BUG_ON(!block_group_cache_done(block_group));
5584                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5585         } else {
5586                 mutex_lock(&caching_ctl->mutex);
5587
5588                 if (start >= caching_ctl->progress) {
5589                         ret = add_excluded_extent(root, start, num_bytes);
5590                 } else if (start + num_bytes <= caching_ctl->progress) {
5591                         ret = btrfs_remove_free_space(block_group,
5592                                                       start, num_bytes);
5593                 } else {
5594                         num_bytes = caching_ctl->progress - start;
5595                         ret = btrfs_remove_free_space(block_group,
5596                                                       start, num_bytes);
5597                         if (ret)
5598                                 goto out_lock;
5599
5600                         num_bytes = (start + num_bytes) -
5601                                 caching_ctl->progress;
5602                         start = caching_ctl->progress;
5603                         ret = add_excluded_extent(root, start, num_bytes);
5604                 }
5605 out_lock:
5606                 mutex_unlock(&caching_ctl->mutex);
5607                 put_caching_control(caching_ctl);
5608         }
5609         btrfs_put_block_group(block_group);
5610         return ret;
5611 }
5612
5613 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5614                                  struct extent_buffer *eb)
5615 {
5616         struct btrfs_file_extent_item *item;
5617         struct btrfs_key key;
5618         int found_type;
5619         int i;
5620
5621         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5622                 return 0;
5623
5624         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5625                 btrfs_item_key_to_cpu(eb, &key, i);
5626                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5627                         continue;
5628                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5629                 found_type = btrfs_file_extent_type(eb, item);
5630                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5631                         continue;
5632                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5633                         continue;
5634                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5635                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5636                 __exclude_logged_extent(log, key.objectid, key.offset);
5637         }
5638
5639         return 0;
5640 }
5641
5642 /**
5643  * btrfs_update_reserved_bytes - update the block_group and space info counters
5644  * @cache:      The cache we are manipulating
5645  * @num_bytes:  The number of bytes in question
5646  * @reserve:    One of the reservation enums
5647  * @delalloc:   The blocks are allocated for the delalloc write
5648  *
5649  * This is called by the allocator when it reserves space, or by somebody who is
5650  * freeing space that was never actually used on disk.  For example if you
5651  * reserve some space for a new leaf in transaction A and before transaction A
5652  * commits you free that leaf, you call this with reserve set to 0 in order to
5653  * clear the reservation.
5654  *
5655  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5656  * ENOSPC accounting.  For data we handle the reservation through clearing the
5657  * delalloc bits in the io_tree.  We have to do this since we could end up
5658  * allocating less disk space for the amount of data we have reserved in the
5659  * case of compression.
5660  *
5661  * If this is a reservation and the block group has become read only we cannot
5662  * make the reservation and return -EAGAIN, otherwise this function always
5663  * succeeds.
5664  */
5665 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5666                                        u64 num_bytes, int reserve, int delalloc)
5667 {
5668         struct btrfs_space_info *space_info = cache->space_info;
5669         int ret = 0;
5670
5671         spin_lock(&space_info->lock);
5672         spin_lock(&cache->lock);
5673         if (reserve != RESERVE_FREE) {
5674                 if (cache->ro) {
5675                         ret = -EAGAIN;
5676                 } else {
5677                         cache->reserved += num_bytes;
5678                         space_info->bytes_reserved += num_bytes;
5679                         if (reserve == RESERVE_ALLOC) {
5680                                 trace_btrfs_space_reservation(cache->fs_info,
5681                                                 "space_info", space_info->flags,
5682                                                 num_bytes, 0);
5683                                 space_info->bytes_may_use -= num_bytes;
5684                         }
5685
5686                         if (delalloc)
5687                                 cache->delalloc_bytes += num_bytes;
5688                 }
5689         } else {
5690                 if (cache->ro)
5691                         space_info->bytes_readonly += num_bytes;
5692                 cache->reserved -= num_bytes;
5693                 space_info->bytes_reserved -= num_bytes;
5694
5695                 if (delalloc)
5696                         cache->delalloc_bytes -= num_bytes;
5697         }
5698         spin_unlock(&cache->lock);
5699         spin_unlock(&space_info->lock);
5700         return ret;
5701 }
5702
5703 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5704                                 struct btrfs_root *root)
5705 {
5706         struct btrfs_fs_info *fs_info = root->fs_info;
5707         struct btrfs_caching_control *next;
5708         struct btrfs_caching_control *caching_ctl;
5709         struct btrfs_block_group_cache *cache;
5710
5711         down_write(&fs_info->commit_root_sem);
5712
5713         list_for_each_entry_safe(caching_ctl, next,
5714                                  &fs_info->caching_block_groups, list) {
5715                 cache = caching_ctl->block_group;
5716                 if (block_group_cache_done(cache)) {
5717                         cache->last_byte_to_unpin = (u64)-1;
5718                         list_del_init(&caching_ctl->list);
5719                         put_caching_control(caching_ctl);
5720                 } else {
5721                         cache->last_byte_to_unpin = caching_ctl->progress;
5722                 }
5723         }
5724
5725         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5726                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5727         else
5728                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5729
5730         up_write(&fs_info->commit_root_sem);
5731
5732         update_global_block_rsv(fs_info);
5733 }
5734
5735 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
5736                               const bool return_free_space)
5737 {
5738         struct btrfs_fs_info *fs_info = root->fs_info;
5739         struct btrfs_block_group_cache *cache = NULL;
5740         struct btrfs_space_info *space_info;
5741         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5742         u64 len;
5743         bool readonly;
5744
5745         while (start <= end) {
5746                 readonly = false;
5747                 if (!cache ||
5748                     start >= cache->key.objectid + cache->key.offset) {
5749                         if (cache)
5750                                 btrfs_put_block_group(cache);
5751                         cache = btrfs_lookup_block_group(fs_info, start);
5752                         BUG_ON(!cache); /* Logic error */
5753                 }
5754
5755                 len = cache->key.objectid + cache->key.offset - start;
5756                 len = min(len, end + 1 - start);
5757
5758                 if (start < cache->last_byte_to_unpin) {
5759                         len = min(len, cache->last_byte_to_unpin - start);
5760                         if (return_free_space)
5761                                 btrfs_add_free_space(cache, start, len);
5762                 }
5763
5764                 start += len;
5765                 space_info = cache->space_info;
5766
5767                 spin_lock(&space_info->lock);
5768                 spin_lock(&cache->lock);
5769                 cache->pinned -= len;
5770                 space_info->bytes_pinned -= len;
5771                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5772                 if (cache->ro) {
5773                         space_info->bytes_readonly += len;
5774                         readonly = true;
5775                 }
5776                 spin_unlock(&cache->lock);
5777                 if (!readonly && global_rsv->space_info == space_info) {
5778                         spin_lock(&global_rsv->lock);
5779                         if (!global_rsv->full) {
5780                                 len = min(len, global_rsv->size -
5781                                           global_rsv->reserved);
5782                                 global_rsv->reserved += len;
5783                                 space_info->bytes_may_use += len;
5784                                 if (global_rsv->reserved >= global_rsv->size)
5785                                         global_rsv->full = 1;
5786                         }
5787                         spin_unlock(&global_rsv->lock);
5788                 }
5789                 spin_unlock(&space_info->lock);
5790         }
5791
5792         if (cache)
5793                 btrfs_put_block_group(cache);
5794         return 0;
5795 }
5796
5797 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5798                                struct btrfs_root *root)
5799 {
5800         struct btrfs_fs_info *fs_info = root->fs_info;
5801         struct extent_io_tree *unpin;
5802         u64 start;
5803         u64 end;
5804         int ret;
5805
5806         if (trans->aborted)
5807                 return 0;
5808
5809         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5810                 unpin = &fs_info->freed_extents[1];
5811         else
5812                 unpin = &fs_info->freed_extents[0];
5813
5814         while (1) {
5815                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
5816                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5817                                             EXTENT_DIRTY, NULL);
5818                 if (ret) {
5819                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5820                         break;
5821                 }
5822
5823                 if (btrfs_test_opt(root, DISCARD))
5824                         ret = btrfs_discard_extent(root, start,
5825                                                    end + 1 - start, NULL);
5826
5827                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5828                 unpin_extent_range(root, start, end, true);
5829                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5830                 cond_resched();
5831         }
5832
5833         return 0;
5834 }
5835
5836 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5837                              u64 owner, u64 root_objectid)
5838 {
5839         struct btrfs_space_info *space_info;
5840         u64 flags;
5841
5842         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5843                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5844                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5845                 else
5846                         flags = BTRFS_BLOCK_GROUP_METADATA;
5847         } else {
5848                 flags = BTRFS_BLOCK_GROUP_DATA;
5849         }
5850
5851         space_info = __find_space_info(fs_info, flags);
5852         BUG_ON(!space_info); /* Logic bug */
5853         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5854 }
5855
5856
5857 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5858                                 struct btrfs_root *root,
5859                                 u64 bytenr, u64 num_bytes, u64 parent,
5860                                 u64 root_objectid, u64 owner_objectid,
5861                                 u64 owner_offset, int refs_to_drop,
5862                                 struct btrfs_delayed_extent_op *extent_op,
5863                                 int no_quota)
5864 {
5865         struct btrfs_key key;
5866         struct btrfs_path *path;
5867         struct btrfs_fs_info *info = root->fs_info;
5868         struct btrfs_root *extent_root = info->extent_root;
5869         struct extent_buffer *leaf;
5870         struct btrfs_extent_item *ei;
5871         struct btrfs_extent_inline_ref *iref;
5872         int ret;
5873         int is_data;
5874         int extent_slot = 0;
5875         int found_extent = 0;
5876         int num_to_del = 1;
5877         u32 item_size;
5878         u64 refs;
5879         int last_ref = 0;
5880         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5881         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5882                                                  SKINNY_METADATA);
5883
5884         if (!info->quota_enabled || !is_fstree(root_objectid))
5885                 no_quota = 1;
5886
5887         path = btrfs_alloc_path();
5888         if (!path)
5889                 return -ENOMEM;
5890
5891         path->reada = 1;
5892         path->leave_spinning = 1;
5893
5894         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5895         BUG_ON(!is_data && refs_to_drop != 1);
5896
5897         if (is_data)
5898                 skinny_metadata = 0;
5899
5900         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5901                                     bytenr, num_bytes, parent,
5902                                     root_objectid, owner_objectid,
5903                                     owner_offset);
5904         if (ret == 0) {
5905                 extent_slot = path->slots[0];
5906                 while (extent_slot >= 0) {
5907                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5908                                               extent_slot);
5909                         if (key.objectid != bytenr)
5910                                 break;
5911                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5912                             key.offset == num_bytes) {
5913                                 found_extent = 1;
5914                                 break;
5915                         }
5916                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5917                             key.offset == owner_objectid) {
5918                                 found_extent = 1;
5919                                 break;
5920                         }
5921                         if (path->slots[0] - extent_slot > 5)
5922                                 break;
5923                         extent_slot--;
5924                 }
5925 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5926                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5927                 if (found_extent && item_size < sizeof(*ei))
5928                         found_extent = 0;
5929 #endif
5930                 if (!found_extent) {
5931                         BUG_ON(iref);
5932                         ret = remove_extent_backref(trans, extent_root, path,
5933                                                     NULL, refs_to_drop,
5934                                                     is_data, &last_ref);
5935                         if (ret) {
5936                                 btrfs_abort_transaction(trans, extent_root, ret);
5937                                 goto out;
5938                         }
5939                         btrfs_release_path(path);
5940                         path->leave_spinning = 1;
5941
5942                         key.objectid = bytenr;
5943                         key.type = BTRFS_EXTENT_ITEM_KEY;
5944                         key.offset = num_bytes;
5945
5946                         if (!is_data && skinny_metadata) {
5947                                 key.type = BTRFS_METADATA_ITEM_KEY;
5948                                 key.offset = owner_objectid;
5949                         }
5950
5951                         ret = btrfs_search_slot(trans, extent_root,
5952                                                 &key, path, -1, 1);
5953                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5954                                 /*
5955                                  * Couldn't find our skinny metadata item,
5956                                  * see if we have ye olde extent item.
5957                                  */
5958                                 path->slots[0]--;
5959                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5960                                                       path->slots[0]);
5961                                 if (key.objectid == bytenr &&
5962                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5963                                     key.offset == num_bytes)
5964                                         ret = 0;
5965                         }
5966
5967                         if (ret > 0 && skinny_metadata) {
5968                                 skinny_metadata = false;
5969                                 key.objectid = bytenr;
5970                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5971                                 key.offset = num_bytes;
5972                                 btrfs_release_path(path);
5973                                 ret = btrfs_search_slot(trans, extent_root,
5974                                                         &key, path, -1, 1);
5975                         }
5976
5977                         if (ret) {
5978                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5979                                         ret, bytenr);
5980                                 if (ret > 0)
5981                                         btrfs_print_leaf(extent_root,
5982                                                          path->nodes[0]);
5983                         }
5984                         if (ret < 0) {
5985                                 btrfs_abort_transaction(trans, extent_root, ret);
5986                                 goto out;
5987                         }
5988                         extent_slot = path->slots[0];
5989                 }
5990         } else if (WARN_ON(ret == -ENOENT)) {
5991                 btrfs_print_leaf(extent_root, path->nodes[0]);
5992                 btrfs_err(info,
5993                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5994                         bytenr, parent, root_objectid, owner_objectid,
5995                         owner_offset);
5996                 btrfs_abort_transaction(trans, extent_root, ret);
5997                 goto out;
5998         } else {
5999                 btrfs_abort_transaction(trans, extent_root, ret);
6000                 goto out;
6001         }
6002
6003         leaf = path->nodes[0];
6004         item_size = btrfs_item_size_nr(leaf, extent_slot);
6005 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6006         if (item_size < sizeof(*ei)) {
6007                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6008                 ret = convert_extent_item_v0(trans, extent_root, path,
6009                                              owner_objectid, 0);
6010                 if (ret < 0) {
6011                         btrfs_abort_transaction(trans, extent_root, ret);
6012                         goto out;
6013                 }
6014
6015                 btrfs_release_path(path);
6016                 path->leave_spinning = 1;
6017
6018                 key.objectid = bytenr;
6019                 key.type = BTRFS_EXTENT_ITEM_KEY;
6020                 key.offset = num_bytes;
6021
6022                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6023                                         -1, 1);
6024                 if (ret) {
6025                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6026                                 ret, bytenr);
6027                         btrfs_print_leaf(extent_root, path->nodes[0]);
6028                 }
6029                 if (ret < 0) {
6030                         btrfs_abort_transaction(trans, extent_root, ret);
6031                         goto out;
6032                 }
6033
6034                 extent_slot = path->slots[0];
6035                 leaf = path->nodes[0];
6036                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6037         }
6038 #endif
6039         BUG_ON(item_size < sizeof(*ei));
6040         ei = btrfs_item_ptr(leaf, extent_slot,
6041                             struct btrfs_extent_item);
6042         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6043             key.type == BTRFS_EXTENT_ITEM_KEY) {
6044                 struct btrfs_tree_block_info *bi;
6045                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6046                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6047                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6048         }
6049
6050         refs = btrfs_extent_refs(leaf, ei);
6051         if (refs < refs_to_drop) {
6052                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6053                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6054                 ret = -EINVAL;
6055                 btrfs_abort_transaction(trans, extent_root, ret);
6056                 goto out;
6057         }
6058         refs -= refs_to_drop;
6059
6060         if (refs > 0) {
6061                 type = BTRFS_QGROUP_OPER_SUB_SHARED;
6062                 if (extent_op)
6063                         __run_delayed_extent_op(extent_op, leaf, ei);
6064                 /*
6065                  * In the case of inline back ref, reference count will
6066                  * be updated by remove_extent_backref
6067                  */
6068                 if (iref) {
6069                         BUG_ON(!found_extent);
6070                 } else {
6071                         btrfs_set_extent_refs(leaf, ei, refs);
6072                         btrfs_mark_buffer_dirty(leaf);
6073                 }
6074                 if (found_extent) {
6075                         ret = remove_extent_backref(trans, extent_root, path,
6076                                                     iref, refs_to_drop,
6077                                                     is_data, &last_ref);
6078                         if (ret) {
6079                                 btrfs_abort_transaction(trans, extent_root, ret);
6080                                 goto out;
6081                         }
6082                 }
6083                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6084                                  root_objectid);
6085         } else {
6086                 if (found_extent) {
6087                         BUG_ON(is_data && refs_to_drop !=
6088                                extent_data_ref_count(root, path, iref));
6089                         if (iref) {
6090                                 BUG_ON(path->slots[0] != extent_slot);
6091                         } else {
6092                                 BUG_ON(path->slots[0] != extent_slot + 1);
6093                                 path->slots[0] = extent_slot;
6094                                 num_to_del = 2;
6095                         }
6096                 }
6097
6098                 last_ref = 1;
6099                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6100                                       num_to_del);
6101                 if (ret) {
6102                         btrfs_abort_transaction(trans, extent_root, ret);
6103                         goto out;
6104                 }
6105                 btrfs_release_path(path);
6106
6107                 if (is_data) {
6108                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6109                         if (ret) {
6110                                 btrfs_abort_transaction(trans, extent_root, ret);
6111                                 goto out;
6112                         }
6113                 }
6114
6115                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6116                 if (ret) {
6117                         btrfs_abort_transaction(trans, extent_root, ret);
6118                         goto out;
6119                 }
6120         }
6121         btrfs_release_path(path);
6122
6123         /* Deal with the quota accounting */
6124         if (!ret && last_ref && !no_quota) {
6125                 int mod_seq = 0;
6126
6127                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6128                     type == BTRFS_QGROUP_OPER_SUB_SHARED)
6129                         mod_seq = 1;
6130
6131                 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6132                                               bytenr, num_bytes, type,
6133                                               mod_seq);
6134         }
6135 out:
6136         btrfs_free_path(path);
6137         return ret;
6138 }
6139
6140 /*
6141  * when we free an block, it is possible (and likely) that we free the last
6142  * delayed ref for that extent as well.  This searches the delayed ref tree for
6143  * a given extent, and if there are no other delayed refs to be processed, it
6144  * removes it from the tree.
6145  */
6146 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6147                                       struct btrfs_root *root, u64 bytenr)
6148 {
6149         struct btrfs_delayed_ref_head *head;
6150         struct btrfs_delayed_ref_root *delayed_refs;
6151         int ret = 0;
6152
6153         delayed_refs = &trans->transaction->delayed_refs;
6154         spin_lock(&delayed_refs->lock);
6155         head = btrfs_find_delayed_ref_head(trans, bytenr);
6156         if (!head)
6157                 goto out_delayed_unlock;
6158
6159         spin_lock(&head->lock);
6160         if (rb_first(&head->ref_root))
6161                 goto out;
6162
6163         if (head->extent_op) {
6164                 if (!head->must_insert_reserved)
6165                         goto out;
6166                 btrfs_free_delayed_extent_op(head->extent_op);
6167                 head->extent_op = NULL;
6168         }
6169
6170         /*
6171          * waiting for the lock here would deadlock.  If someone else has it
6172          * locked they are already in the process of dropping it anyway
6173          */
6174         if (!mutex_trylock(&head->mutex))
6175                 goto out;
6176
6177         /*
6178          * at this point we have a head with no other entries.  Go
6179          * ahead and process it.
6180          */
6181         head->node.in_tree = 0;
6182         rb_erase(&head->href_node, &delayed_refs->href_root);
6183
6184         atomic_dec(&delayed_refs->num_entries);
6185
6186         /*
6187          * we don't take a ref on the node because we're removing it from the
6188          * tree, so we just steal the ref the tree was holding.
6189          */
6190         delayed_refs->num_heads--;
6191         if (head->processing == 0)
6192                 delayed_refs->num_heads_ready--;
6193         head->processing = 0;
6194         spin_unlock(&head->lock);
6195         spin_unlock(&delayed_refs->lock);
6196
6197         BUG_ON(head->extent_op);
6198         if (head->must_insert_reserved)
6199                 ret = 1;
6200
6201         mutex_unlock(&head->mutex);
6202         btrfs_put_delayed_ref(&head->node);
6203         return ret;
6204 out:
6205         spin_unlock(&head->lock);
6206
6207 out_delayed_unlock:
6208         spin_unlock(&delayed_refs->lock);
6209         return 0;
6210 }
6211
6212 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6213                            struct btrfs_root *root,
6214                            struct extent_buffer *buf,
6215                            u64 parent, int last_ref)
6216 {
6217         int pin = 1;
6218         int ret;
6219
6220         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6221                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6222                                         buf->start, buf->len,
6223                                         parent, root->root_key.objectid,
6224                                         btrfs_header_level(buf),
6225                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6226                 BUG_ON(ret); /* -ENOMEM */
6227         }
6228
6229         if (!last_ref)
6230                 return;
6231
6232         if (btrfs_header_generation(buf) == trans->transid) {
6233                 struct btrfs_block_group_cache *cache;
6234
6235                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6236                         ret = check_ref_cleanup(trans, root, buf->start);
6237                         if (!ret)
6238                                 goto out;
6239                 }
6240
6241                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6242
6243                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6244                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6245                         btrfs_put_block_group(cache);
6246                         goto out;
6247                 }
6248
6249                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6250
6251                 btrfs_add_free_space(cache, buf->start, buf->len);
6252                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6253                 btrfs_put_block_group(cache);
6254                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6255                 pin = 0;
6256         }
6257 out:
6258         if (pin)
6259                 add_pinned_bytes(root->fs_info, buf->len,
6260                                  btrfs_header_level(buf),
6261                                  root->root_key.objectid);
6262
6263         /*
6264          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6265          * anymore.
6266          */
6267         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6268 }
6269
6270 /* Can return -ENOMEM */
6271 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6272                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6273                       u64 owner, u64 offset, int no_quota)
6274 {
6275         int ret;
6276         struct btrfs_fs_info *fs_info = root->fs_info;
6277
6278         if (btrfs_test_is_dummy_root(root))
6279                 return 0;
6280
6281         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6282
6283         /*
6284          * tree log blocks never actually go into the extent allocation
6285          * tree, just update pinning info and exit early.
6286          */
6287         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6288                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6289                 /* unlocks the pinned mutex */
6290                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6291                 ret = 0;
6292         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6293                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6294                                         num_bytes,
6295                                         parent, root_objectid, (int)owner,
6296                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6297         } else {
6298                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6299                                                 num_bytes,
6300                                                 parent, root_objectid, owner,
6301                                                 offset, BTRFS_DROP_DELAYED_REF,
6302                                                 NULL, no_quota);
6303         }
6304         return ret;
6305 }
6306
6307 /*
6308  * when we wait for progress in the block group caching, its because
6309  * our allocation attempt failed at least once.  So, we must sleep
6310  * and let some progress happen before we try again.
6311  *
6312  * This function will sleep at least once waiting for new free space to
6313  * show up, and then it will check the block group free space numbers
6314  * for our min num_bytes.  Another option is to have it go ahead
6315  * and look in the rbtree for a free extent of a given size, but this
6316  * is a good start.
6317  *
6318  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6319  * any of the information in this block group.
6320  */
6321 static noinline void
6322 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6323                                 u64 num_bytes)
6324 {
6325         struct btrfs_caching_control *caching_ctl;
6326
6327         caching_ctl = get_caching_control(cache);
6328         if (!caching_ctl)
6329                 return;
6330
6331         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6332                    (cache->free_space_ctl->free_space >= num_bytes));
6333
6334         put_caching_control(caching_ctl);
6335 }
6336
6337 static noinline int
6338 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6339 {
6340         struct btrfs_caching_control *caching_ctl;
6341         int ret = 0;
6342
6343         caching_ctl = get_caching_control(cache);
6344         if (!caching_ctl)
6345                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6346
6347         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6348         if (cache->cached == BTRFS_CACHE_ERROR)
6349                 ret = -EIO;
6350         put_caching_control(caching_ctl);
6351         return ret;
6352 }
6353
6354 int __get_raid_index(u64 flags)
6355 {
6356         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6357                 return BTRFS_RAID_RAID10;
6358         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6359                 return BTRFS_RAID_RAID1;
6360         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6361                 return BTRFS_RAID_DUP;
6362         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6363                 return BTRFS_RAID_RAID0;
6364         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6365                 return BTRFS_RAID_RAID5;
6366         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6367                 return BTRFS_RAID_RAID6;
6368
6369         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6370 }
6371
6372 int get_block_group_index(struct btrfs_block_group_cache *cache)
6373 {
6374         return __get_raid_index(cache->flags);
6375 }
6376
6377 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6378         [BTRFS_RAID_RAID10]     = "raid10",
6379         [BTRFS_RAID_RAID1]      = "raid1",
6380         [BTRFS_RAID_DUP]        = "dup",
6381         [BTRFS_RAID_RAID0]      = "raid0",
6382         [BTRFS_RAID_SINGLE]     = "single",
6383         [BTRFS_RAID_RAID5]      = "raid5",
6384         [BTRFS_RAID_RAID6]      = "raid6",
6385 };
6386
6387 static const char *get_raid_name(enum btrfs_raid_types type)
6388 {
6389         if (type >= BTRFS_NR_RAID_TYPES)
6390                 return NULL;
6391
6392         return btrfs_raid_type_names[type];
6393 }
6394
6395 enum btrfs_loop_type {
6396         LOOP_CACHING_NOWAIT = 0,
6397         LOOP_CACHING_WAIT = 1,
6398         LOOP_ALLOC_CHUNK = 2,
6399         LOOP_NO_EMPTY_SIZE = 3,
6400 };
6401
6402 static inline void
6403 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6404                        int delalloc)
6405 {
6406         if (delalloc)
6407                 down_read(&cache->data_rwsem);
6408 }
6409
6410 static inline void
6411 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6412                        int delalloc)
6413 {
6414         btrfs_get_block_group(cache);
6415         if (delalloc)
6416                 down_read(&cache->data_rwsem);
6417 }
6418
6419 static struct btrfs_block_group_cache *
6420 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6421                    struct btrfs_free_cluster *cluster,
6422                    int delalloc)
6423 {
6424         struct btrfs_block_group_cache *used_bg;
6425         bool locked = false;
6426 again:
6427         spin_lock(&cluster->refill_lock);
6428         if (locked) {
6429                 if (used_bg == cluster->block_group)
6430                         return used_bg;
6431
6432                 up_read(&used_bg->data_rwsem);
6433                 btrfs_put_block_group(used_bg);
6434         }
6435
6436         used_bg = cluster->block_group;
6437         if (!used_bg)
6438                 return NULL;
6439
6440         if (used_bg == block_group)
6441                 return used_bg;
6442
6443         btrfs_get_block_group(used_bg);
6444
6445         if (!delalloc)
6446                 return used_bg;
6447
6448         if (down_read_trylock(&used_bg->data_rwsem))
6449                 return used_bg;
6450
6451         spin_unlock(&cluster->refill_lock);
6452         down_read(&used_bg->data_rwsem);
6453         locked = true;
6454         goto again;
6455 }
6456
6457 static inline void
6458 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6459                          int delalloc)
6460 {
6461         if (delalloc)
6462                 up_read(&cache->data_rwsem);
6463         btrfs_put_block_group(cache);
6464 }
6465
6466 /*
6467  * walks the btree of allocated extents and find a hole of a given size.
6468  * The key ins is changed to record the hole:
6469  * ins->objectid == start position
6470  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6471  * ins->offset == the size of the hole.
6472  * Any available blocks before search_start are skipped.
6473  *
6474  * If there is no suitable free space, we will record the max size of
6475  * the free space extent currently.
6476  */
6477 static noinline int find_free_extent(struct btrfs_root *orig_root,
6478                                      u64 num_bytes, u64 empty_size,
6479                                      u64 hint_byte, struct btrfs_key *ins,
6480                                      u64 flags, int delalloc)
6481 {
6482         int ret = 0;
6483         struct btrfs_root *root = orig_root->fs_info->extent_root;
6484         struct btrfs_free_cluster *last_ptr = NULL;
6485         struct btrfs_block_group_cache *block_group = NULL;
6486         u64 search_start = 0;
6487         u64 max_extent_size = 0;
6488         int empty_cluster = 2 * 1024 * 1024;
6489         struct btrfs_space_info *space_info;
6490         int loop = 0;
6491         int index = __get_raid_index(flags);
6492         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6493                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6494         bool failed_cluster_refill = false;
6495         bool failed_alloc = false;
6496         bool use_cluster = true;
6497         bool have_caching_bg = false;
6498
6499         WARN_ON(num_bytes < root->sectorsize);
6500         ins->type = BTRFS_EXTENT_ITEM_KEY;
6501         ins->objectid = 0;
6502         ins->offset = 0;
6503
6504         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6505
6506         space_info = __find_space_info(root->fs_info, flags);
6507         if (!space_info) {
6508                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6509                 return -ENOSPC;
6510         }
6511
6512         /*
6513          * If the space info is for both data and metadata it means we have a
6514          * small filesystem and we can't use the clustering stuff.
6515          */
6516         if (btrfs_mixed_space_info(space_info))
6517                 use_cluster = false;
6518
6519         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6520                 last_ptr = &root->fs_info->meta_alloc_cluster;
6521                 if (!btrfs_test_opt(root, SSD))
6522                         empty_cluster = 64 * 1024;
6523         }
6524
6525         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6526             btrfs_test_opt(root, SSD)) {
6527                 last_ptr = &root->fs_info->data_alloc_cluster;
6528         }
6529
6530         if (last_ptr) {
6531                 spin_lock(&last_ptr->lock);
6532                 if (last_ptr->block_group)
6533                         hint_byte = last_ptr->window_start;
6534                 spin_unlock(&last_ptr->lock);
6535         }
6536
6537         search_start = max(search_start, first_logical_byte(root, 0));
6538         search_start = max(search_start, hint_byte);
6539
6540         if (!last_ptr)
6541                 empty_cluster = 0;
6542
6543         if (search_start == hint_byte) {
6544                 block_group = btrfs_lookup_block_group(root->fs_info,
6545                                                        search_start);
6546                 /*
6547                  * we don't want to use the block group if it doesn't match our
6548                  * allocation bits, or if its not cached.
6549                  *
6550                  * However if we are re-searching with an ideal block group
6551                  * picked out then we don't care that the block group is cached.
6552                  */
6553                 if (block_group && block_group_bits(block_group, flags) &&
6554                     block_group->cached != BTRFS_CACHE_NO) {
6555                         down_read(&space_info->groups_sem);
6556                         if (list_empty(&block_group->list) ||
6557                             block_group->ro) {
6558                                 /*
6559                                  * someone is removing this block group,
6560                                  * we can't jump into the have_block_group
6561                                  * target because our list pointers are not
6562                                  * valid
6563                                  */
6564                                 btrfs_put_block_group(block_group);
6565                                 up_read(&space_info->groups_sem);
6566                         } else {
6567                                 index = get_block_group_index(block_group);
6568                                 btrfs_lock_block_group(block_group, delalloc);
6569                                 goto have_block_group;
6570                         }
6571                 } else if (block_group) {
6572                         btrfs_put_block_group(block_group);
6573                 }
6574         }
6575 search:
6576         have_caching_bg = false;
6577         down_read(&space_info->groups_sem);
6578         list_for_each_entry(block_group, &space_info->block_groups[index],
6579                             list) {
6580                 u64 offset;
6581                 int cached;
6582
6583                 btrfs_grab_block_group(block_group, delalloc);
6584                 search_start = block_group->key.objectid;
6585
6586                 /*
6587                  * this can happen if we end up cycling through all the
6588                  * raid types, but we want to make sure we only allocate
6589                  * for the proper type.
6590                  */
6591                 if (!block_group_bits(block_group, flags)) {
6592                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6593                                 BTRFS_BLOCK_GROUP_RAID1 |
6594                                 BTRFS_BLOCK_GROUP_RAID5 |
6595                                 BTRFS_BLOCK_GROUP_RAID6 |
6596                                 BTRFS_BLOCK_GROUP_RAID10;
6597
6598                         /*
6599                          * if they asked for extra copies and this block group
6600                          * doesn't provide them, bail.  This does allow us to
6601                          * fill raid0 from raid1.
6602                          */
6603                         if ((flags & extra) && !(block_group->flags & extra))
6604                                 goto loop;
6605                 }
6606
6607 have_block_group:
6608                 cached = block_group_cache_done(block_group);
6609                 if (unlikely(!cached)) {
6610                         ret = cache_block_group(block_group, 0);
6611                         BUG_ON(ret < 0);
6612                         ret = 0;
6613                 }
6614
6615                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6616                         goto loop;
6617                 if (unlikely(block_group->ro))
6618                         goto loop;
6619
6620                 /*
6621                  * Ok we want to try and use the cluster allocator, so
6622                  * lets look there
6623                  */
6624                 if (last_ptr) {
6625                         struct btrfs_block_group_cache *used_block_group;
6626                         unsigned long aligned_cluster;
6627                         /*
6628                          * the refill lock keeps out other
6629                          * people trying to start a new cluster
6630                          */
6631                         used_block_group = btrfs_lock_cluster(block_group,
6632                                                               last_ptr,
6633                                                               delalloc);
6634                         if (!used_block_group)
6635                                 goto refill_cluster;
6636
6637                         if (used_block_group != block_group &&
6638                             (used_block_group->ro ||
6639                              !block_group_bits(used_block_group, flags)))
6640                                 goto release_cluster;
6641
6642                         offset = btrfs_alloc_from_cluster(used_block_group,
6643                                                 last_ptr,
6644                                                 num_bytes,
6645                                                 used_block_group->key.objectid,
6646                                                 &max_extent_size);
6647                         if (offset) {
6648                                 /* we have a block, we're done */
6649                                 spin_unlock(&last_ptr->refill_lock);
6650                                 trace_btrfs_reserve_extent_cluster(root,
6651                                                 used_block_group,
6652                                                 search_start, num_bytes);
6653                                 if (used_block_group != block_group) {
6654                                         btrfs_release_block_group(block_group,
6655                                                                   delalloc);
6656                                         block_group = used_block_group;
6657                                 }
6658                                 goto checks;
6659                         }
6660
6661                         WARN_ON(last_ptr->block_group != used_block_group);
6662 release_cluster:
6663                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6664                          * set up a new clusters, so lets just skip it
6665                          * and let the allocator find whatever block
6666                          * it can find.  If we reach this point, we
6667                          * will have tried the cluster allocator
6668                          * plenty of times and not have found
6669                          * anything, so we are likely way too
6670                          * fragmented for the clustering stuff to find
6671                          * anything.
6672                          *
6673                          * However, if the cluster is taken from the
6674                          * current block group, release the cluster
6675                          * first, so that we stand a better chance of
6676                          * succeeding in the unclustered
6677                          * allocation.  */
6678                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6679                             used_block_group != block_group) {
6680                                 spin_unlock(&last_ptr->refill_lock);
6681                                 btrfs_release_block_group(used_block_group,
6682                                                           delalloc);
6683                                 goto unclustered_alloc;
6684                         }
6685
6686                         /*
6687                          * this cluster didn't work out, free it and
6688                          * start over
6689                          */
6690                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6691
6692                         if (used_block_group != block_group)
6693                                 btrfs_release_block_group(used_block_group,
6694                                                           delalloc);
6695 refill_cluster:
6696                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6697                                 spin_unlock(&last_ptr->refill_lock);
6698                                 goto unclustered_alloc;
6699                         }
6700
6701                         aligned_cluster = max_t(unsigned long,
6702                                                 empty_cluster + empty_size,
6703                                               block_group->full_stripe_len);
6704
6705                         /* allocate a cluster in this block group */
6706                         ret = btrfs_find_space_cluster(root, block_group,
6707                                                        last_ptr, search_start,
6708                                                        num_bytes,
6709                                                        aligned_cluster);
6710                         if (ret == 0) {
6711                                 /*
6712                                  * now pull our allocation out of this
6713                                  * cluster
6714                                  */
6715                                 offset = btrfs_alloc_from_cluster(block_group,
6716                                                         last_ptr,
6717                                                         num_bytes,
6718                                                         search_start,
6719                                                         &max_extent_size);
6720                                 if (offset) {
6721                                         /* we found one, proceed */
6722                                         spin_unlock(&last_ptr->refill_lock);
6723                                         trace_btrfs_reserve_extent_cluster(root,
6724                                                 block_group, search_start,
6725                                                 num_bytes);
6726                                         goto checks;
6727                                 }
6728                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6729                                    && !failed_cluster_refill) {
6730                                 spin_unlock(&last_ptr->refill_lock);
6731
6732                                 failed_cluster_refill = true;
6733                                 wait_block_group_cache_progress(block_group,
6734                                        num_bytes + empty_cluster + empty_size);
6735                                 goto have_block_group;
6736                         }
6737
6738                         /*
6739                          * at this point we either didn't find a cluster
6740                          * or we weren't able to allocate a block from our
6741                          * cluster.  Free the cluster we've been trying
6742                          * to use, and go to the next block group
6743                          */
6744                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6745                         spin_unlock(&last_ptr->refill_lock);
6746                         goto loop;
6747                 }
6748
6749 unclustered_alloc:
6750                 spin_lock(&block_group->free_space_ctl->tree_lock);
6751                 if (cached &&
6752                     block_group->free_space_ctl->free_space <
6753                     num_bytes + empty_cluster + empty_size) {
6754                         if (block_group->free_space_ctl->free_space >
6755                             max_extent_size)
6756                                 max_extent_size =
6757                                         block_group->free_space_ctl->free_space;
6758                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6759                         goto loop;
6760                 }
6761                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6762
6763                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6764                                                     num_bytes, empty_size,
6765                                                     &max_extent_size);
6766                 /*
6767                  * If we didn't find a chunk, and we haven't failed on this
6768                  * block group before, and this block group is in the middle of
6769                  * caching and we are ok with waiting, then go ahead and wait
6770                  * for progress to be made, and set failed_alloc to true.
6771                  *
6772                  * If failed_alloc is true then we've already waited on this
6773                  * block group once and should move on to the next block group.
6774                  */
6775                 if (!offset && !failed_alloc && !cached &&
6776                     loop > LOOP_CACHING_NOWAIT) {
6777                         wait_block_group_cache_progress(block_group,
6778                                                 num_bytes + empty_size);
6779                         failed_alloc = true;
6780                         goto have_block_group;
6781                 } else if (!offset) {
6782                         if (!cached)
6783                                 have_caching_bg = true;
6784                         goto loop;
6785                 }
6786 checks:
6787                 search_start = ALIGN(offset, root->stripesize);
6788
6789                 /* move on to the next group */
6790                 if (search_start + num_bytes >
6791                     block_group->key.objectid + block_group->key.offset) {
6792                         btrfs_add_free_space(block_group, offset, num_bytes);
6793                         goto loop;
6794                 }
6795
6796                 if (offset < search_start)
6797                         btrfs_add_free_space(block_group, offset,
6798                                              search_start - offset);
6799                 BUG_ON(offset > search_start);
6800
6801                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6802                                                   alloc_type, delalloc);
6803                 if (ret == -EAGAIN) {
6804                         btrfs_add_free_space(block_group, offset, num_bytes);
6805                         goto loop;
6806                 }
6807
6808                 /* we are all good, lets return */
6809                 ins->objectid = search_start;
6810                 ins->offset = num_bytes;
6811
6812                 trace_btrfs_reserve_extent(orig_root, block_group,
6813                                            search_start, num_bytes);
6814                 btrfs_release_block_group(block_group, delalloc);
6815                 break;
6816 loop:
6817                 failed_cluster_refill = false;
6818                 failed_alloc = false;
6819                 BUG_ON(index != get_block_group_index(block_group));
6820                 btrfs_release_block_group(block_group, delalloc);
6821         }
6822         up_read(&space_info->groups_sem);
6823
6824         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6825                 goto search;
6826
6827         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6828                 goto search;
6829
6830         /*
6831          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6832          *                      caching kthreads as we move along
6833          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6834          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6835          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6836          *                      again
6837          */
6838         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6839                 index = 0;
6840                 loop++;
6841                 if (loop == LOOP_ALLOC_CHUNK) {
6842                         struct btrfs_trans_handle *trans;
6843                         int exist = 0;
6844
6845                         trans = current->journal_info;
6846                         if (trans)
6847                                 exist = 1;
6848                         else
6849                                 trans = btrfs_join_transaction(root);
6850
6851                         if (IS_ERR(trans)) {
6852                                 ret = PTR_ERR(trans);
6853                                 goto out;
6854                         }
6855
6856                         ret = do_chunk_alloc(trans, root, flags,
6857                                              CHUNK_ALLOC_FORCE);
6858                         /*
6859                          * Do not bail out on ENOSPC since we
6860                          * can do more things.
6861                          */
6862                         if (ret < 0 && ret != -ENOSPC)
6863                                 btrfs_abort_transaction(trans,
6864                                                         root, ret);
6865                         else
6866                                 ret = 0;
6867                         if (!exist)
6868                                 btrfs_end_transaction(trans, root);
6869                         if (ret)
6870                                 goto out;
6871                 }
6872
6873                 if (loop == LOOP_NO_EMPTY_SIZE) {
6874                         empty_size = 0;
6875                         empty_cluster = 0;
6876                 }
6877
6878                 goto search;
6879         } else if (!ins->objectid) {
6880                 ret = -ENOSPC;
6881         } else if (ins->objectid) {
6882                 ret = 0;
6883         }
6884 out:
6885         if (ret == -ENOSPC)
6886                 ins->offset = max_extent_size;
6887         return ret;
6888 }
6889
6890 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6891                             int dump_block_groups)
6892 {
6893         struct btrfs_block_group_cache *cache;
6894         int index = 0;
6895
6896         spin_lock(&info->lock);
6897         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6898                info->flags,
6899                info->total_bytes - info->bytes_used - info->bytes_pinned -
6900                info->bytes_reserved - info->bytes_readonly,
6901                (info->full) ? "" : "not ");
6902         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6903                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6904                info->total_bytes, info->bytes_used, info->bytes_pinned,
6905                info->bytes_reserved, info->bytes_may_use,
6906                info->bytes_readonly);
6907         spin_unlock(&info->lock);
6908
6909         if (!dump_block_groups)
6910                 return;
6911
6912         down_read(&info->groups_sem);
6913 again:
6914         list_for_each_entry(cache, &info->block_groups[index], list) {
6915                 spin_lock(&cache->lock);
6916                 printk(KERN_INFO "BTRFS: "
6917                            "block group %llu has %llu bytes, "
6918                            "%llu used %llu pinned %llu reserved %s\n",
6919                        cache->key.objectid, cache->key.offset,
6920                        btrfs_block_group_used(&cache->item), cache->pinned,
6921                        cache->reserved, cache->ro ? "[readonly]" : "");
6922                 btrfs_dump_free_space(cache, bytes);
6923                 spin_unlock(&cache->lock);
6924         }
6925         if (++index < BTRFS_NR_RAID_TYPES)
6926                 goto again;
6927         up_read(&info->groups_sem);
6928 }
6929
6930 int btrfs_reserve_extent(struct btrfs_root *root,
6931                          u64 num_bytes, u64 min_alloc_size,
6932                          u64 empty_size, u64 hint_byte,
6933                          struct btrfs_key *ins, int is_data, int delalloc)
6934 {
6935         bool final_tried = false;
6936         u64 flags;
6937         int ret;
6938
6939         flags = btrfs_get_alloc_profile(root, is_data);
6940 again:
6941         WARN_ON(num_bytes < root->sectorsize);
6942         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6943                                flags, delalloc);
6944
6945         if (ret == -ENOSPC) {
6946                 if (!final_tried && ins->offset) {
6947                         num_bytes = min(num_bytes >> 1, ins->offset);
6948                         num_bytes = round_down(num_bytes, root->sectorsize);
6949                         num_bytes = max(num_bytes, min_alloc_size);
6950                         if (num_bytes == min_alloc_size)
6951                                 final_tried = true;
6952                         goto again;
6953                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6954                         struct btrfs_space_info *sinfo;
6955
6956                         sinfo = __find_space_info(root->fs_info, flags);
6957                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6958                                 flags, num_bytes);
6959                         if (sinfo)
6960                                 dump_space_info(sinfo, num_bytes, 1);
6961                 }
6962         }
6963
6964         return ret;
6965 }
6966
6967 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6968                                         u64 start, u64 len,
6969                                         int pin, int delalloc)
6970 {
6971         struct btrfs_block_group_cache *cache;
6972         int ret = 0;
6973
6974         cache = btrfs_lookup_block_group(root->fs_info, start);
6975         if (!cache) {
6976                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6977                         start);
6978                 return -ENOSPC;
6979         }
6980
6981         if (pin)
6982                 pin_down_extent(root, cache, start, len, 1);
6983         else {
6984                 if (btrfs_test_opt(root, DISCARD))
6985                         ret = btrfs_discard_extent(root, start, len, NULL);
6986                 btrfs_add_free_space(cache, start, len);
6987                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6988         }
6989         btrfs_put_block_group(cache);
6990
6991         trace_btrfs_reserved_extent_free(root, start, len);
6992
6993         return ret;
6994 }
6995
6996 int btrfs_free_reserved_extent(struct btrfs_root *root,
6997                                u64 start, u64 len, int delalloc)
6998 {
6999         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7000 }
7001
7002 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7003                                        u64 start, u64 len)
7004 {
7005         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7006 }
7007
7008 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7009                                       struct btrfs_root *root,
7010                                       u64 parent, u64 root_objectid,
7011                                       u64 flags, u64 owner, u64 offset,
7012                                       struct btrfs_key *ins, int ref_mod)
7013 {
7014         int ret;
7015         struct btrfs_fs_info *fs_info = root->fs_info;
7016         struct btrfs_extent_item *extent_item;
7017         struct btrfs_extent_inline_ref *iref;
7018         struct btrfs_path *path;
7019         struct extent_buffer *leaf;
7020         int type;
7021         u32 size;
7022
7023         if (parent > 0)
7024                 type = BTRFS_SHARED_DATA_REF_KEY;
7025         else
7026                 type = BTRFS_EXTENT_DATA_REF_KEY;
7027
7028         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7029
7030         path = btrfs_alloc_path();
7031         if (!path)
7032                 return -ENOMEM;
7033
7034         path->leave_spinning = 1;
7035         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7036                                       ins, size);
7037         if (ret) {
7038                 btrfs_free_path(path);
7039                 return ret;
7040         }
7041
7042         leaf = path->nodes[0];
7043         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7044                                      struct btrfs_extent_item);
7045         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7046         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7047         btrfs_set_extent_flags(leaf, extent_item,
7048                                flags | BTRFS_EXTENT_FLAG_DATA);
7049
7050         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7051         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7052         if (parent > 0) {
7053                 struct btrfs_shared_data_ref *ref;
7054                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7055                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7056                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7057         } else {
7058                 struct btrfs_extent_data_ref *ref;
7059                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7060                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7061                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7062                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7063                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7064         }
7065
7066         btrfs_mark_buffer_dirty(path->nodes[0]);
7067         btrfs_free_path(path);
7068
7069         /* Always set parent to 0 here since its exclusive anyway. */
7070         ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7071                                       ins->objectid, ins->offset,
7072                                       BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7073         if (ret)
7074                 return ret;
7075
7076         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7077         if (ret) { /* -ENOENT, logic error */
7078                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7079                         ins->objectid, ins->offset);
7080                 BUG();
7081         }
7082         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7083         return ret;
7084 }
7085
7086 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7087                                      struct btrfs_root *root,
7088                                      u64 parent, u64 root_objectid,
7089                                      u64 flags, struct btrfs_disk_key *key,
7090                                      int level, struct btrfs_key *ins,
7091                                      int no_quota)
7092 {
7093         int ret;
7094         struct btrfs_fs_info *fs_info = root->fs_info;
7095         struct btrfs_extent_item *extent_item;
7096         struct btrfs_tree_block_info *block_info;
7097         struct btrfs_extent_inline_ref *iref;
7098         struct btrfs_path *path;
7099         struct extent_buffer *leaf;
7100         u32 size = sizeof(*extent_item) + sizeof(*iref);
7101         u64 num_bytes = ins->offset;
7102         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7103                                                  SKINNY_METADATA);
7104
7105         if (!skinny_metadata)
7106                 size += sizeof(*block_info);
7107
7108         path = btrfs_alloc_path();
7109         if (!path) {
7110                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7111                                                    root->nodesize);
7112                 return -ENOMEM;
7113         }
7114
7115         path->leave_spinning = 1;
7116         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7117                                       ins, size);
7118         if (ret) {
7119                 btrfs_free_path(path);
7120                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7121                                                    root->nodesize);
7122                 return ret;
7123         }
7124
7125         leaf = path->nodes[0];
7126         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7127                                      struct btrfs_extent_item);
7128         btrfs_set_extent_refs(leaf, extent_item, 1);
7129         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7130         btrfs_set_extent_flags(leaf, extent_item,
7131                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7132
7133         if (skinny_metadata) {
7134                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7135                 num_bytes = root->nodesize;
7136         } else {
7137                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7138                 btrfs_set_tree_block_key(leaf, block_info, key);
7139                 btrfs_set_tree_block_level(leaf, block_info, level);
7140                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7141         }
7142
7143         if (parent > 0) {
7144                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7145                 btrfs_set_extent_inline_ref_type(leaf, iref,
7146                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7147                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7148         } else {
7149                 btrfs_set_extent_inline_ref_type(leaf, iref,
7150                                                  BTRFS_TREE_BLOCK_REF_KEY);
7151                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7152         }
7153
7154         btrfs_mark_buffer_dirty(leaf);
7155         btrfs_free_path(path);
7156
7157         if (!no_quota) {
7158                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7159                                               ins->objectid, num_bytes,
7160                                               BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7161                 if (ret)
7162                         return ret;
7163         }
7164
7165         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7166                                  1);
7167         if (ret) { /* -ENOENT, logic error */
7168                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7169                         ins->objectid, ins->offset);
7170                 BUG();
7171         }
7172
7173         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7174         return ret;
7175 }
7176
7177 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7178                                      struct btrfs_root *root,
7179                                      u64 root_objectid, u64 owner,
7180                                      u64 offset, struct btrfs_key *ins)
7181 {
7182         int ret;
7183
7184         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7185
7186         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7187                                          ins->offset, 0,
7188                                          root_objectid, owner, offset,
7189                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7190         return ret;
7191 }
7192
7193 /*
7194  * this is used by the tree logging recovery code.  It records that
7195  * an extent has been allocated and makes sure to clear the free
7196  * space cache bits as well
7197  */
7198 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7199                                    struct btrfs_root *root,
7200                                    u64 root_objectid, u64 owner, u64 offset,
7201                                    struct btrfs_key *ins)
7202 {
7203         int ret;
7204         struct btrfs_block_group_cache *block_group;
7205
7206         /*
7207          * Mixed block groups will exclude before processing the log so we only
7208          * need to do the exlude dance if this fs isn't mixed.
7209          */
7210         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7211                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7212                 if (ret)
7213                         return ret;
7214         }
7215
7216         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7217         if (!block_group)
7218                 return -EINVAL;
7219
7220         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7221                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7222         BUG_ON(ret); /* logic error */
7223         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7224                                          0, owner, offset, ins, 1);
7225         btrfs_put_block_group(block_group);
7226         return ret;
7227 }
7228
7229 static struct extent_buffer *
7230 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7231                       u64 bytenr, int level)
7232 {
7233         struct extent_buffer *buf;
7234
7235         buf = btrfs_find_create_tree_block(root, bytenr);
7236         if (!buf)
7237                 return ERR_PTR(-ENOMEM);
7238         btrfs_set_header_generation(buf, trans->transid);
7239         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7240         btrfs_tree_lock(buf);
7241         clean_tree_block(trans, root->fs_info, buf);
7242         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7243
7244         btrfs_set_lock_blocking(buf);
7245         btrfs_set_buffer_uptodate(buf);
7246
7247         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7248                 buf->log_index = root->log_transid % 2;
7249                 /*
7250                  * we allow two log transactions at a time, use different
7251                  * EXENT bit to differentiate dirty pages.
7252                  */
7253                 if (buf->log_index == 0)
7254                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7255                                         buf->start + buf->len - 1, GFP_NOFS);
7256                 else
7257                         set_extent_new(&root->dirty_log_pages, buf->start,
7258                                         buf->start + buf->len - 1, GFP_NOFS);
7259         } else {
7260                 buf->log_index = -1;
7261                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7262                          buf->start + buf->len - 1, GFP_NOFS);
7263         }
7264         trans->blocks_used++;
7265         /* this returns a buffer locked for blocking */
7266         return buf;
7267 }
7268
7269 static struct btrfs_block_rsv *
7270 use_block_rsv(struct btrfs_trans_handle *trans,
7271               struct btrfs_root *root, u32 blocksize)
7272 {
7273         struct btrfs_block_rsv *block_rsv;
7274         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7275         int ret;
7276         bool global_updated = false;
7277
7278         block_rsv = get_block_rsv(trans, root);
7279
7280         if (unlikely(block_rsv->size == 0))
7281                 goto try_reserve;
7282 again:
7283         ret = block_rsv_use_bytes(block_rsv, blocksize);
7284         if (!ret)
7285                 return block_rsv;
7286
7287         if (block_rsv->failfast)
7288                 return ERR_PTR(ret);
7289
7290         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7291                 global_updated = true;
7292                 update_global_block_rsv(root->fs_info);
7293                 goto again;
7294         }
7295
7296         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7297                 static DEFINE_RATELIMIT_STATE(_rs,
7298                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7299                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7300                 if (__ratelimit(&_rs))
7301                         WARN(1, KERN_DEBUG
7302                                 "BTRFS: block rsv returned %d\n", ret);
7303         }
7304 try_reserve:
7305         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7306                                      BTRFS_RESERVE_NO_FLUSH);
7307         if (!ret)
7308                 return block_rsv;
7309         /*
7310          * If we couldn't reserve metadata bytes try and use some from
7311          * the global reserve if its space type is the same as the global
7312          * reservation.
7313          */
7314         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7315             block_rsv->space_info == global_rsv->space_info) {
7316                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7317                 if (!ret)
7318                         return global_rsv;
7319         }
7320         return ERR_PTR(ret);
7321 }
7322
7323 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7324                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7325 {
7326         block_rsv_add_bytes(block_rsv, blocksize, 0);
7327         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7328 }
7329
7330 /*
7331  * finds a free extent and does all the dirty work required for allocation
7332  * returns the key for the extent through ins, and a tree buffer for
7333  * the first block of the extent through buf.
7334  *
7335  * returns the tree buffer or NULL.
7336  */
7337 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7338                                         struct btrfs_root *root,
7339                                         u64 parent, u64 root_objectid,
7340                                         struct btrfs_disk_key *key, int level,
7341                                         u64 hint, u64 empty_size)
7342 {
7343         struct btrfs_key ins;
7344         struct btrfs_block_rsv *block_rsv;
7345         struct extent_buffer *buf;
7346         u64 flags = 0;
7347         int ret;
7348         u32 blocksize = root->nodesize;
7349         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7350                                                  SKINNY_METADATA);
7351
7352         if (btrfs_test_is_dummy_root(root)) {
7353                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7354                                             level);
7355                 if (!IS_ERR(buf))
7356                         root->alloc_bytenr += blocksize;
7357                 return buf;
7358         }
7359
7360         block_rsv = use_block_rsv(trans, root, blocksize);
7361         if (IS_ERR(block_rsv))
7362                 return ERR_CAST(block_rsv);
7363
7364         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7365                                    empty_size, hint, &ins, 0, 0);
7366         if (ret) {
7367                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7368                 return ERR_PTR(ret);
7369         }
7370
7371         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7372         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7373
7374         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7375                 if (parent == 0)
7376                         parent = ins.objectid;
7377                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7378         } else
7379                 BUG_ON(parent > 0);
7380
7381         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7382                 struct btrfs_delayed_extent_op *extent_op;
7383                 extent_op = btrfs_alloc_delayed_extent_op();
7384                 BUG_ON(!extent_op); /* -ENOMEM */
7385                 if (key)
7386                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7387                 else
7388                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7389                 extent_op->flags_to_set = flags;
7390                 if (skinny_metadata)
7391                         extent_op->update_key = 0;
7392                 else
7393                         extent_op->update_key = 1;
7394                 extent_op->update_flags = 1;
7395                 extent_op->is_data = 0;
7396                 extent_op->level = level;
7397
7398                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7399                                         ins.objectid,
7400                                         ins.offset, parent, root_objectid,
7401                                         level, BTRFS_ADD_DELAYED_EXTENT,
7402                                         extent_op, 0);
7403                 BUG_ON(ret); /* -ENOMEM */
7404         }
7405         return buf;
7406 }
7407
7408 struct walk_control {
7409         u64 refs[BTRFS_MAX_LEVEL];
7410         u64 flags[BTRFS_MAX_LEVEL];
7411         struct btrfs_key update_progress;
7412         int stage;
7413         int level;
7414         int shared_level;
7415         int update_ref;
7416         int keep_locks;
7417         int reada_slot;
7418         int reada_count;
7419         int for_reloc;
7420 };
7421
7422 #define DROP_REFERENCE  1
7423 #define UPDATE_BACKREF  2
7424
7425 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7426                                      struct btrfs_root *root,
7427                                      struct walk_control *wc,
7428                                      struct btrfs_path *path)
7429 {
7430         u64 bytenr;
7431         u64 generation;
7432         u64 refs;
7433         u64 flags;
7434         u32 nritems;
7435         u32 blocksize;
7436         struct btrfs_key key;
7437         struct extent_buffer *eb;
7438         int ret;
7439         int slot;
7440         int nread = 0;
7441
7442         if (path->slots[wc->level] < wc->reada_slot) {
7443                 wc->reada_count = wc->reada_count * 2 / 3;
7444                 wc->reada_count = max(wc->reada_count, 2);
7445         } else {
7446                 wc->reada_count = wc->reada_count * 3 / 2;
7447                 wc->reada_count = min_t(int, wc->reada_count,
7448                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7449         }
7450
7451         eb = path->nodes[wc->level];
7452         nritems = btrfs_header_nritems(eb);
7453         blocksize = root->nodesize;
7454
7455         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7456                 if (nread >= wc->reada_count)
7457                         break;
7458
7459                 cond_resched();
7460                 bytenr = btrfs_node_blockptr(eb, slot);
7461                 generation = btrfs_node_ptr_generation(eb, slot);
7462
7463                 if (slot == path->slots[wc->level])
7464                         goto reada;
7465
7466                 if (wc->stage == UPDATE_BACKREF &&
7467                     generation <= root->root_key.offset)
7468                         continue;
7469
7470                 /* We don't lock the tree block, it's OK to be racy here */
7471                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7472                                                wc->level - 1, 1, &refs,
7473                                                &flags);
7474                 /* We don't care about errors in readahead. */
7475                 if (ret < 0)
7476                         continue;
7477                 BUG_ON(refs == 0);
7478
7479                 if (wc->stage == DROP_REFERENCE) {
7480                         if (refs == 1)
7481                                 goto reada;
7482
7483                         if (wc->level == 1 &&
7484                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7485                                 continue;
7486                         if (!wc->update_ref ||
7487                             generation <= root->root_key.offset)
7488                                 continue;
7489                         btrfs_node_key_to_cpu(eb, &key, slot);
7490                         ret = btrfs_comp_cpu_keys(&key,
7491                                                   &wc->update_progress);
7492                         if (ret < 0)
7493                                 continue;
7494                 } else {
7495                         if (wc->level == 1 &&
7496                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7497                                 continue;
7498                 }
7499 reada:
7500                 readahead_tree_block(root, bytenr);
7501                 nread++;
7502         }
7503         wc->reada_slot = slot;
7504 }
7505
7506 static int account_leaf_items(struct btrfs_trans_handle *trans,
7507                               struct btrfs_root *root,
7508                               struct extent_buffer *eb)
7509 {
7510         int nr = btrfs_header_nritems(eb);
7511         int i, extent_type, ret;
7512         struct btrfs_key key;
7513         struct btrfs_file_extent_item *fi;
7514         u64 bytenr, num_bytes;
7515
7516         for (i = 0; i < nr; i++) {
7517                 btrfs_item_key_to_cpu(eb, &key, i);
7518
7519                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7520                         continue;
7521
7522                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7523                 /* filter out non qgroup-accountable extents  */
7524                 extent_type = btrfs_file_extent_type(eb, fi);
7525
7526                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7527                         continue;
7528
7529                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7530                 if (!bytenr)
7531                         continue;
7532
7533                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7534
7535                 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7536                                               root->objectid,
7537                                               bytenr, num_bytes,
7538                                               BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7539                 if (ret)
7540                         return ret;
7541         }
7542         return 0;
7543 }
7544
7545 /*
7546  * Walk up the tree from the bottom, freeing leaves and any interior
7547  * nodes which have had all slots visited. If a node (leaf or
7548  * interior) is freed, the node above it will have it's slot
7549  * incremented. The root node will never be freed.
7550  *
7551  * At the end of this function, we should have a path which has all
7552  * slots incremented to the next position for a search. If we need to
7553  * read a new node it will be NULL and the node above it will have the
7554  * correct slot selected for a later read.
7555  *
7556  * If we increment the root nodes slot counter past the number of
7557  * elements, 1 is returned to signal completion of the search.
7558  */
7559 static int adjust_slots_upwards(struct btrfs_root *root,
7560                                 struct btrfs_path *path, int root_level)
7561 {
7562         int level = 0;
7563         int nr, slot;
7564         struct extent_buffer *eb;
7565
7566         if (root_level == 0)
7567                 return 1;
7568
7569         while (level <= root_level) {
7570                 eb = path->nodes[level];
7571                 nr = btrfs_header_nritems(eb);
7572                 path->slots[level]++;
7573                 slot = path->slots[level];
7574                 if (slot >= nr || level == 0) {
7575                         /*
7576                          * Don't free the root -  we will detect this
7577                          * condition after our loop and return a
7578                          * positive value for caller to stop walking the tree.
7579                          */
7580                         if (level != root_level) {
7581                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7582                                 path->locks[level] = 0;
7583
7584                                 free_extent_buffer(eb);
7585                                 path->nodes[level] = NULL;
7586                                 path->slots[level] = 0;
7587                         }
7588                 } else {
7589                         /*
7590                          * We have a valid slot to walk back down
7591                          * from. Stop here so caller can process these
7592                          * new nodes.
7593                          */
7594                         break;
7595                 }
7596
7597                 level++;
7598         }
7599
7600         eb = path->nodes[root_level];
7601         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7602                 return 1;
7603
7604         return 0;
7605 }
7606
7607 /*
7608  * root_eb is the subtree root and is locked before this function is called.
7609  */
7610 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7611                                   struct btrfs_root *root,
7612                                   struct extent_buffer *root_eb,
7613                                   u64 root_gen,
7614                                   int root_level)
7615 {
7616         int ret = 0;
7617         int level;
7618         struct extent_buffer *eb = root_eb;
7619         struct btrfs_path *path = NULL;
7620
7621         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7622         BUG_ON(root_eb == NULL);
7623
7624         if (!root->fs_info->quota_enabled)
7625                 return 0;
7626
7627         if (!extent_buffer_uptodate(root_eb)) {
7628                 ret = btrfs_read_buffer(root_eb, root_gen);
7629                 if (ret)
7630                         goto out;
7631         }
7632
7633         if (root_level == 0) {
7634                 ret = account_leaf_items(trans, root, root_eb);
7635                 goto out;
7636         }
7637
7638         path = btrfs_alloc_path();
7639         if (!path)
7640                 return -ENOMEM;
7641
7642         /*
7643          * Walk down the tree.  Missing extent blocks are filled in as
7644          * we go. Metadata is accounted every time we read a new
7645          * extent block.
7646          *
7647          * When we reach a leaf, we account for file extent items in it,
7648          * walk back up the tree (adjusting slot pointers as we go)
7649          * and restart the search process.
7650          */
7651         extent_buffer_get(root_eb); /* For path */
7652         path->nodes[root_level] = root_eb;
7653         path->slots[root_level] = 0;
7654         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7655 walk_down:
7656         level = root_level;
7657         while (level >= 0) {
7658                 if (path->nodes[level] == NULL) {
7659                         int parent_slot;
7660                         u64 child_gen;
7661                         u64 child_bytenr;
7662
7663                         /* We need to get child blockptr/gen from
7664                          * parent before we can read it. */
7665                         eb = path->nodes[level + 1];
7666                         parent_slot = path->slots[level + 1];
7667                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7668                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7669
7670                         eb = read_tree_block(root, child_bytenr, child_gen);
7671                         if (!eb || !extent_buffer_uptodate(eb)) {
7672                                 ret = -EIO;
7673                                 goto out;
7674                         }
7675
7676                         path->nodes[level] = eb;
7677                         path->slots[level] = 0;
7678
7679                         btrfs_tree_read_lock(eb);
7680                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7681                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7682
7683                         ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7684                                                 root->objectid,
7685                                                 child_bytenr,
7686                                                 root->nodesize,
7687                                                 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7688                                                 0);
7689                         if (ret)
7690                                 goto out;
7691
7692                 }
7693
7694                 if (level == 0) {
7695                         ret = account_leaf_items(trans, root, path->nodes[level]);
7696                         if (ret)
7697                                 goto out;
7698
7699                         /* Nonzero return here means we completed our search */
7700                         ret = adjust_slots_upwards(root, path, root_level);
7701                         if (ret)
7702                                 break;
7703
7704                         /* Restart search with new slots */
7705                         goto walk_down;
7706                 }
7707
7708                 level--;
7709         }
7710
7711         ret = 0;
7712 out:
7713         btrfs_free_path(path);
7714
7715         return ret;
7716 }
7717
7718 /*
7719  * helper to process tree block while walking down the tree.
7720  *
7721  * when wc->stage == UPDATE_BACKREF, this function updates
7722  * back refs for pointers in the block.
7723  *
7724  * NOTE: return value 1 means we should stop walking down.
7725  */
7726 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7727                                    struct btrfs_root *root,
7728                                    struct btrfs_path *path,
7729                                    struct walk_control *wc, int lookup_info)
7730 {
7731         int level = wc->level;
7732         struct extent_buffer *eb = path->nodes[level];
7733         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7734         int ret;
7735
7736         if (wc->stage == UPDATE_BACKREF &&
7737             btrfs_header_owner(eb) != root->root_key.objectid)
7738                 return 1;
7739
7740         /*
7741          * when reference count of tree block is 1, it won't increase
7742          * again. once full backref flag is set, we never clear it.
7743          */
7744         if (lookup_info &&
7745             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7746              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7747                 BUG_ON(!path->locks[level]);
7748                 ret = btrfs_lookup_extent_info(trans, root,
7749                                                eb->start, level, 1,
7750                                                &wc->refs[level],
7751                                                &wc->flags[level]);
7752                 BUG_ON(ret == -ENOMEM);
7753                 if (ret)
7754                         return ret;
7755                 BUG_ON(wc->refs[level] == 0);
7756         }
7757
7758         if (wc->stage == DROP_REFERENCE) {
7759                 if (wc->refs[level] > 1)
7760                         return 1;
7761
7762                 if (path->locks[level] && !wc->keep_locks) {
7763                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7764                         path->locks[level] = 0;
7765                 }
7766                 return 0;
7767         }
7768
7769         /* wc->stage == UPDATE_BACKREF */
7770         if (!(wc->flags[level] & flag)) {
7771                 BUG_ON(!path->locks[level]);
7772                 ret = btrfs_inc_ref(trans, root, eb, 1);
7773                 BUG_ON(ret); /* -ENOMEM */
7774                 ret = btrfs_dec_ref(trans, root, eb, 0);
7775                 BUG_ON(ret); /* -ENOMEM */
7776                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7777                                                   eb->len, flag,
7778                                                   btrfs_header_level(eb), 0);
7779                 BUG_ON(ret); /* -ENOMEM */
7780                 wc->flags[level] |= flag;
7781         }
7782
7783         /*
7784          * the block is shared by multiple trees, so it's not good to
7785          * keep the tree lock
7786          */
7787         if (path->locks[level] && level > 0) {
7788                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7789                 path->locks[level] = 0;
7790         }
7791         return 0;
7792 }
7793
7794 /*
7795  * helper to process tree block pointer.
7796  *
7797  * when wc->stage == DROP_REFERENCE, this function checks
7798  * reference count of the block pointed to. if the block
7799  * is shared and we need update back refs for the subtree
7800  * rooted at the block, this function changes wc->stage to
7801  * UPDATE_BACKREF. if the block is shared and there is no
7802  * need to update back, this function drops the reference
7803  * to the block.
7804  *
7805  * NOTE: return value 1 means we should stop walking down.
7806  */
7807 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7808                                  struct btrfs_root *root,
7809                                  struct btrfs_path *path,
7810                                  struct walk_control *wc, int *lookup_info)
7811 {
7812         u64 bytenr;
7813         u64 generation;
7814         u64 parent;
7815         u32 blocksize;
7816         struct btrfs_key key;
7817         struct extent_buffer *next;
7818         int level = wc->level;
7819         int reada = 0;
7820         int ret = 0;
7821         bool need_account = false;
7822
7823         generation = btrfs_node_ptr_generation(path->nodes[level],
7824                                                path->slots[level]);
7825         /*
7826          * if the lower level block was created before the snapshot
7827          * was created, we know there is no need to update back refs
7828          * for the subtree
7829          */
7830         if (wc->stage == UPDATE_BACKREF &&
7831             generation <= root->root_key.offset) {
7832                 *lookup_info = 1;
7833                 return 1;
7834         }
7835
7836         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7837         blocksize = root->nodesize;
7838
7839         next = btrfs_find_tree_block(root->fs_info, bytenr);
7840         if (!next) {
7841                 next = btrfs_find_create_tree_block(root, bytenr);
7842                 if (!next)
7843                         return -ENOMEM;
7844                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7845                                                level - 1);
7846                 reada = 1;
7847         }
7848         btrfs_tree_lock(next);
7849         btrfs_set_lock_blocking(next);
7850
7851         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7852                                        &wc->refs[level - 1],
7853                                        &wc->flags[level - 1]);
7854         if (ret < 0) {
7855                 btrfs_tree_unlock(next);
7856                 return ret;
7857         }
7858
7859         if (unlikely(wc->refs[level - 1] == 0)) {
7860                 btrfs_err(root->fs_info, "Missing references.");
7861                 BUG();
7862         }
7863         *lookup_info = 0;
7864
7865         if (wc->stage == DROP_REFERENCE) {
7866                 if (wc->refs[level - 1] > 1) {
7867                         need_account = true;
7868                         if (level == 1 &&
7869                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7870                                 goto skip;
7871
7872                         if (!wc->update_ref ||
7873                             generation <= root->root_key.offset)
7874                                 goto skip;
7875
7876                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7877                                               path->slots[level]);
7878                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7879                         if (ret < 0)
7880                                 goto skip;
7881
7882                         wc->stage = UPDATE_BACKREF;
7883                         wc->shared_level = level - 1;
7884                 }
7885         } else {
7886                 if (level == 1 &&
7887                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7888                         goto skip;
7889         }
7890
7891         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7892                 btrfs_tree_unlock(next);
7893                 free_extent_buffer(next);
7894                 next = NULL;
7895                 *lookup_info = 1;
7896         }
7897
7898         if (!next) {
7899                 if (reada && level == 1)
7900                         reada_walk_down(trans, root, wc, path);
7901                 next = read_tree_block(root, bytenr, generation);
7902                 if (!next || !extent_buffer_uptodate(next)) {
7903                         free_extent_buffer(next);
7904                         return -EIO;
7905                 }
7906                 btrfs_tree_lock(next);
7907                 btrfs_set_lock_blocking(next);
7908         }
7909
7910         level--;
7911         BUG_ON(level != btrfs_header_level(next));
7912         path->nodes[level] = next;
7913         path->slots[level] = 0;
7914         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7915         wc->level = level;
7916         if (wc->level == 1)
7917                 wc->reada_slot = 0;
7918         return 0;
7919 skip:
7920         wc->refs[level - 1] = 0;
7921         wc->flags[level - 1] = 0;
7922         if (wc->stage == DROP_REFERENCE) {
7923                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7924                         parent = path->nodes[level]->start;
7925                 } else {
7926                         BUG_ON(root->root_key.objectid !=
7927                                btrfs_header_owner(path->nodes[level]));
7928                         parent = 0;
7929                 }
7930
7931                 if (need_account) {
7932                         ret = account_shared_subtree(trans, root, next,
7933                                                      generation, level - 1);
7934                         if (ret) {
7935                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7936                                         "%d accounting shared subtree. Quota "
7937                                         "is out of sync, rescan required.\n",
7938                                         root->fs_info->sb->s_id, ret);
7939                         }
7940                 }
7941                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7942                                 root->root_key.objectid, level - 1, 0, 0);
7943                 BUG_ON(ret); /* -ENOMEM */
7944         }
7945         btrfs_tree_unlock(next);
7946         free_extent_buffer(next);
7947         *lookup_info = 1;
7948         return 1;
7949 }
7950
7951 /*
7952  * helper to process tree block while walking up the tree.
7953  *
7954  * when wc->stage == DROP_REFERENCE, this function drops
7955  * reference count on the block.
7956  *
7957  * when wc->stage == UPDATE_BACKREF, this function changes
7958  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7959  * to UPDATE_BACKREF previously while processing the block.
7960  *
7961  * NOTE: return value 1 means we should stop walking up.
7962  */
7963 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7964                                  struct btrfs_root *root,
7965                                  struct btrfs_path *path,
7966                                  struct walk_control *wc)
7967 {
7968         int ret;
7969         int level = wc->level;
7970         struct extent_buffer *eb = path->nodes[level];
7971         u64 parent = 0;
7972
7973         if (wc->stage == UPDATE_BACKREF) {
7974                 BUG_ON(wc->shared_level < level);
7975                 if (level < wc->shared_level)
7976                         goto out;
7977
7978                 ret = find_next_key(path, level + 1, &wc->update_progress);
7979                 if (ret > 0)
7980                         wc->update_ref = 0;
7981
7982                 wc->stage = DROP_REFERENCE;
7983                 wc->shared_level = -1;
7984                 path->slots[level] = 0;
7985
7986                 /*
7987                  * check reference count again if the block isn't locked.
7988                  * we should start walking down the tree again if reference
7989                  * count is one.
7990                  */
7991                 if (!path->locks[level]) {
7992                         BUG_ON(level == 0);
7993                         btrfs_tree_lock(eb);
7994                         btrfs_set_lock_blocking(eb);
7995                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7996
7997                         ret = btrfs_lookup_extent_info(trans, root,
7998                                                        eb->start, level, 1,
7999                                                        &wc->refs[level],
8000                                                        &wc->flags[level]);
8001                         if (ret < 0) {
8002                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8003                                 path->locks[level] = 0;
8004                                 return ret;
8005                         }
8006                         BUG_ON(wc->refs[level] == 0);
8007                         if (wc->refs[level] == 1) {
8008                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8009                                 path->locks[level] = 0;
8010                                 return 1;
8011                         }
8012                 }
8013         }
8014
8015         /* wc->stage == DROP_REFERENCE */
8016         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8017
8018         if (wc->refs[level] == 1) {
8019                 if (level == 0) {
8020                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8021                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8022                         else
8023                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8024                         BUG_ON(ret); /* -ENOMEM */
8025                         ret = account_leaf_items(trans, root, eb);
8026                         if (ret) {
8027                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8028                                         "%d accounting leaf items. Quota "
8029                                         "is out of sync, rescan required.\n",
8030                                         root->fs_info->sb->s_id, ret);
8031                         }
8032                 }
8033                 /* make block locked assertion in clean_tree_block happy */
8034                 if (!path->locks[level] &&
8035                     btrfs_header_generation(eb) == trans->transid) {
8036                         btrfs_tree_lock(eb);
8037                         btrfs_set_lock_blocking(eb);
8038                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8039                 }
8040                 clean_tree_block(trans, root->fs_info, eb);
8041         }
8042
8043         if (eb == root->node) {
8044                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8045                         parent = eb->start;
8046                 else
8047                         BUG_ON(root->root_key.objectid !=
8048                                btrfs_header_owner(eb));
8049         } else {
8050                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8051                         parent = path->nodes[level + 1]->start;
8052                 else
8053                         BUG_ON(root->root_key.objectid !=
8054                                btrfs_header_owner(path->nodes[level + 1]));
8055         }
8056
8057         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8058 out:
8059         wc->refs[level] = 0;
8060         wc->flags[level] = 0;
8061         return 0;
8062 }
8063
8064 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8065                                    struct btrfs_root *root,
8066                                    struct btrfs_path *path,
8067                                    struct walk_control *wc)
8068 {
8069         int level = wc->level;
8070         int lookup_info = 1;
8071         int ret;
8072
8073         while (level >= 0) {
8074                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8075                 if (ret > 0)
8076                         break;
8077
8078                 if (level == 0)
8079                         break;
8080
8081                 if (path->slots[level] >=
8082                     btrfs_header_nritems(path->nodes[level]))
8083                         break;
8084
8085                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8086                 if (ret > 0) {
8087                         path->slots[level]++;
8088                         continue;
8089                 } else if (ret < 0)
8090                         return ret;
8091                 level = wc->level;
8092         }
8093         return 0;
8094 }
8095
8096 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8097                                  struct btrfs_root *root,
8098                                  struct btrfs_path *path,
8099                                  struct walk_control *wc, int max_level)
8100 {
8101         int level = wc->level;
8102         int ret;
8103
8104         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8105         while (level < max_level && path->nodes[level]) {
8106                 wc->level = level;
8107                 if (path->slots[level] + 1 <
8108                     btrfs_header_nritems(path->nodes[level])) {
8109                         path->slots[level]++;
8110                         return 0;
8111                 } else {
8112                         ret = walk_up_proc(trans, root, path, wc);
8113                         if (ret > 0)
8114                                 return 0;
8115
8116                         if (path->locks[level]) {
8117                                 btrfs_tree_unlock_rw(path->nodes[level],
8118                                                      path->locks[level]);
8119                                 path->locks[level] = 0;
8120                         }
8121                         free_extent_buffer(path->nodes[level]);
8122                         path->nodes[level] = NULL;
8123                         level++;
8124                 }
8125         }
8126         return 1;
8127 }
8128
8129 /*
8130  * drop a subvolume tree.
8131  *
8132  * this function traverses the tree freeing any blocks that only
8133  * referenced by the tree.
8134  *
8135  * when a shared tree block is found. this function decreases its
8136  * reference count by one. if update_ref is true, this function
8137  * also make sure backrefs for the shared block and all lower level
8138  * blocks are properly updated.
8139  *
8140  * If called with for_reloc == 0, may exit early with -EAGAIN
8141  */
8142 int btrfs_drop_snapshot(struct btrfs_root *root,
8143                          struct btrfs_block_rsv *block_rsv, int update_ref,
8144                          int for_reloc)
8145 {
8146         struct btrfs_path *path;
8147         struct btrfs_trans_handle *trans;
8148         struct btrfs_root *tree_root = root->fs_info->tree_root;
8149         struct btrfs_root_item *root_item = &root->root_item;
8150         struct walk_control *wc;
8151         struct btrfs_key key;
8152         int err = 0;
8153         int ret;
8154         int level;
8155         bool root_dropped = false;
8156
8157         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8158
8159         path = btrfs_alloc_path();
8160         if (!path) {
8161                 err = -ENOMEM;
8162                 goto out;
8163         }
8164
8165         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8166         if (!wc) {
8167                 btrfs_free_path(path);
8168                 err = -ENOMEM;
8169                 goto out;
8170         }
8171
8172         trans = btrfs_start_transaction(tree_root, 0);
8173         if (IS_ERR(trans)) {
8174                 err = PTR_ERR(trans);
8175                 goto out_free;
8176         }
8177
8178         if (block_rsv)
8179                 trans->block_rsv = block_rsv;
8180
8181         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8182                 level = btrfs_header_level(root->node);
8183                 path->nodes[level] = btrfs_lock_root_node(root);
8184                 btrfs_set_lock_blocking(path->nodes[level]);
8185                 path->slots[level] = 0;
8186                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8187                 memset(&wc->update_progress, 0,
8188                        sizeof(wc->update_progress));
8189         } else {
8190                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8191                 memcpy(&wc->update_progress, &key,
8192                        sizeof(wc->update_progress));
8193
8194                 level = root_item->drop_level;
8195                 BUG_ON(level == 0);
8196                 path->lowest_level = level;
8197                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8198                 path->lowest_level = 0;
8199                 if (ret < 0) {
8200                         err = ret;
8201                         goto out_end_trans;
8202                 }
8203                 WARN_ON(ret > 0);
8204
8205                 /*
8206                  * unlock our path, this is safe because only this
8207                  * function is allowed to delete this snapshot
8208                  */
8209                 btrfs_unlock_up_safe(path, 0);
8210
8211                 level = btrfs_header_level(root->node);
8212                 while (1) {
8213                         btrfs_tree_lock(path->nodes[level]);
8214                         btrfs_set_lock_blocking(path->nodes[level]);
8215                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8216
8217                         ret = btrfs_lookup_extent_info(trans, root,
8218                                                 path->nodes[level]->start,
8219                                                 level, 1, &wc->refs[level],
8220                                                 &wc->flags[level]);
8221                         if (ret < 0) {
8222                                 err = ret;
8223                                 goto out_end_trans;
8224                         }
8225                         BUG_ON(wc->refs[level] == 0);
8226
8227                         if (level == root_item->drop_level)
8228                                 break;
8229
8230                         btrfs_tree_unlock(path->nodes[level]);
8231                         path->locks[level] = 0;
8232                         WARN_ON(wc->refs[level] != 1);
8233                         level--;
8234                 }
8235         }
8236
8237         wc->level = level;
8238         wc->shared_level = -1;
8239         wc->stage = DROP_REFERENCE;
8240         wc->update_ref = update_ref;
8241         wc->keep_locks = 0;
8242         wc->for_reloc = for_reloc;
8243         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8244
8245         while (1) {
8246
8247                 ret = walk_down_tree(trans, root, path, wc);
8248                 if (ret < 0) {
8249                         err = ret;
8250                         break;
8251                 }
8252
8253                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8254                 if (ret < 0) {
8255                         err = ret;
8256                         break;
8257                 }
8258
8259                 if (ret > 0) {
8260                         BUG_ON(wc->stage != DROP_REFERENCE);
8261                         break;
8262                 }
8263
8264                 if (wc->stage == DROP_REFERENCE) {
8265                         level = wc->level;
8266                         btrfs_node_key(path->nodes[level],
8267                                        &root_item->drop_progress,
8268                                        path->slots[level]);
8269                         root_item->drop_level = level;
8270                 }
8271
8272                 BUG_ON(wc->level == 0);
8273                 if (btrfs_should_end_transaction(trans, tree_root) ||
8274                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8275                         ret = btrfs_update_root(trans, tree_root,
8276                                                 &root->root_key,
8277                                                 root_item);
8278                         if (ret) {
8279                                 btrfs_abort_transaction(trans, tree_root, ret);
8280                                 err = ret;
8281                                 goto out_end_trans;
8282                         }
8283
8284                         /*
8285                          * Qgroup update accounting is run from
8286                          * delayed ref handling. This usually works
8287                          * out because delayed refs are normally the
8288                          * only way qgroup updates are added. However,
8289                          * we may have added updates during our tree
8290                          * walk so run qgroups here to make sure we
8291                          * don't lose any updates.
8292                          */
8293                         ret = btrfs_delayed_qgroup_accounting(trans,
8294                                                               root->fs_info);
8295                         if (ret)
8296                                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8297                                                    "running qgroup updates "
8298                                                    "during snapshot delete. "
8299                                                    "Quota is out of sync, "
8300                                                    "rescan required.\n", ret);
8301
8302                         btrfs_end_transaction_throttle(trans, tree_root);
8303                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8304                                 pr_debug("BTRFS: drop snapshot early exit\n");
8305                                 err = -EAGAIN;
8306                                 goto out_free;
8307                         }
8308
8309                         trans = btrfs_start_transaction(tree_root, 0);
8310                         if (IS_ERR(trans)) {
8311                                 err = PTR_ERR(trans);
8312                                 goto out_free;
8313                         }
8314                         if (block_rsv)
8315                                 trans->block_rsv = block_rsv;
8316                 }
8317         }
8318         btrfs_release_path(path);
8319         if (err)
8320                 goto out_end_trans;
8321
8322         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8323         if (ret) {
8324                 btrfs_abort_transaction(trans, tree_root, ret);
8325                 goto out_end_trans;
8326         }
8327
8328         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8329                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8330                                       NULL, NULL);
8331                 if (ret < 0) {
8332                         btrfs_abort_transaction(trans, tree_root, ret);
8333                         err = ret;
8334                         goto out_end_trans;
8335                 } else if (ret > 0) {
8336                         /* if we fail to delete the orphan item this time
8337                          * around, it'll get picked up the next time.
8338                          *
8339                          * The most common failure here is just -ENOENT.
8340                          */
8341                         btrfs_del_orphan_item(trans, tree_root,
8342                                               root->root_key.objectid);
8343                 }
8344         }
8345
8346         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8347                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8348         } else {
8349                 free_extent_buffer(root->node);
8350                 free_extent_buffer(root->commit_root);
8351                 btrfs_put_fs_root(root);
8352         }
8353         root_dropped = true;
8354 out_end_trans:
8355         ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8356         if (ret)
8357                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8358                                    "running qgroup updates "
8359                                    "during snapshot delete. "
8360                                    "Quota is out of sync, "
8361                                    "rescan required.\n", ret);
8362
8363         btrfs_end_transaction_throttle(trans, tree_root);
8364 out_free:
8365         kfree(wc);
8366         btrfs_free_path(path);
8367 out:
8368         /*
8369          * So if we need to stop dropping the snapshot for whatever reason we
8370          * need to make sure to add it back to the dead root list so that we
8371          * keep trying to do the work later.  This also cleans up roots if we
8372          * don't have it in the radix (like when we recover after a power fail
8373          * or unmount) so we don't leak memory.
8374          */
8375         if (!for_reloc && root_dropped == false)
8376                 btrfs_add_dead_root(root);
8377         if (err && err != -EAGAIN)
8378                 btrfs_std_error(root->fs_info, err);
8379         return err;
8380 }
8381
8382 /*
8383  * drop subtree rooted at tree block 'node'.
8384  *
8385  * NOTE: this function will unlock and release tree block 'node'
8386  * only used by relocation code
8387  */
8388 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8389                         struct btrfs_root *root,
8390                         struct extent_buffer *node,
8391                         struct extent_buffer *parent)
8392 {
8393         struct btrfs_path *path;
8394         struct walk_control *wc;
8395         int level;
8396         int parent_level;
8397         int ret = 0;
8398         int wret;
8399
8400         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8401
8402         path = btrfs_alloc_path();
8403         if (!path)
8404                 return -ENOMEM;
8405
8406         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8407         if (!wc) {
8408                 btrfs_free_path(path);
8409                 return -ENOMEM;
8410         }
8411
8412         btrfs_assert_tree_locked(parent);
8413         parent_level = btrfs_header_level(parent);
8414         extent_buffer_get(parent);
8415         path->nodes[parent_level] = parent;
8416         path->slots[parent_level] = btrfs_header_nritems(parent);
8417
8418         btrfs_assert_tree_locked(node);
8419         level = btrfs_header_level(node);
8420         path->nodes[level] = node;
8421         path->slots[level] = 0;
8422         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8423
8424         wc->refs[parent_level] = 1;
8425         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8426         wc->level = level;
8427         wc->shared_level = -1;
8428         wc->stage = DROP_REFERENCE;
8429         wc->update_ref = 0;
8430         wc->keep_locks = 1;
8431         wc->for_reloc = 1;
8432         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8433
8434         while (1) {
8435                 wret = walk_down_tree(trans, root, path, wc);
8436                 if (wret < 0) {
8437                         ret = wret;
8438                         break;
8439                 }
8440
8441                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8442                 if (wret < 0)
8443                         ret = wret;
8444                 if (wret != 0)
8445                         break;
8446         }
8447
8448         kfree(wc);
8449         btrfs_free_path(path);
8450         return ret;
8451 }
8452
8453 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8454 {
8455         u64 num_devices;
8456         u64 stripped;
8457
8458         /*
8459          * if restripe for this chunk_type is on pick target profile and
8460          * return, otherwise do the usual balance
8461          */
8462         stripped = get_restripe_target(root->fs_info, flags);
8463         if (stripped)
8464                 return extended_to_chunk(stripped);
8465
8466         num_devices = root->fs_info->fs_devices->rw_devices;
8467
8468         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8469                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8470                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8471
8472         if (num_devices == 1) {
8473                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8474                 stripped = flags & ~stripped;
8475
8476                 /* turn raid0 into single device chunks */
8477                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8478                         return stripped;
8479
8480                 /* turn mirroring into duplication */
8481                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8482                              BTRFS_BLOCK_GROUP_RAID10))
8483                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8484         } else {
8485                 /* they already had raid on here, just return */
8486                 if (flags & stripped)
8487                         return flags;
8488
8489                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8490                 stripped = flags & ~stripped;
8491
8492                 /* switch duplicated blocks with raid1 */
8493                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8494                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8495
8496                 /* this is drive concat, leave it alone */
8497         }
8498
8499         return flags;
8500 }
8501
8502 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8503 {
8504         struct btrfs_space_info *sinfo = cache->space_info;
8505         u64 num_bytes;
8506         u64 min_allocable_bytes;
8507         int ret = -ENOSPC;
8508
8509
8510         /*
8511          * We need some metadata space and system metadata space for
8512          * allocating chunks in some corner cases until we force to set
8513          * it to be readonly.
8514          */
8515         if ((sinfo->flags &
8516              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8517             !force)
8518                 min_allocable_bytes = 1 * 1024 * 1024;
8519         else
8520                 min_allocable_bytes = 0;
8521
8522         spin_lock(&sinfo->lock);
8523         spin_lock(&cache->lock);
8524
8525         if (cache->ro) {
8526                 ret = 0;
8527                 goto out;
8528         }
8529
8530         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8531                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8532
8533         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8534             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8535             min_allocable_bytes <= sinfo->total_bytes) {
8536                 sinfo->bytes_readonly += num_bytes;
8537                 cache->ro = 1;
8538                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8539                 ret = 0;
8540         }
8541 out:
8542         spin_unlock(&cache->lock);
8543         spin_unlock(&sinfo->lock);
8544         return ret;
8545 }
8546
8547 int btrfs_set_block_group_ro(struct btrfs_root *root,
8548                              struct btrfs_block_group_cache *cache)
8549
8550 {
8551         struct btrfs_trans_handle *trans;
8552         u64 alloc_flags;
8553         int ret;
8554
8555         BUG_ON(cache->ro);
8556
8557         trans = btrfs_join_transaction(root);
8558         if (IS_ERR(trans))
8559                 return PTR_ERR(trans);
8560
8561         ret = set_block_group_ro(cache, 0);
8562         if (!ret)
8563                 goto out;
8564         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8565         ret = do_chunk_alloc(trans, root, alloc_flags,
8566                              CHUNK_ALLOC_FORCE);
8567         if (ret < 0)
8568                 goto out;
8569         ret = set_block_group_ro(cache, 0);
8570 out:
8571         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8572                 alloc_flags = update_block_group_flags(root, cache->flags);
8573                 check_system_chunk(trans, root, alloc_flags);
8574         }
8575
8576         btrfs_end_transaction(trans, root);
8577         return ret;
8578 }
8579
8580 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8581                             struct btrfs_root *root, u64 type)
8582 {
8583         u64 alloc_flags = get_alloc_profile(root, type);
8584         return do_chunk_alloc(trans, root, alloc_flags,
8585                               CHUNK_ALLOC_FORCE);
8586 }
8587
8588 /*
8589  * helper to account the unused space of all the readonly block group in the
8590  * space_info. takes mirrors into account.
8591  */
8592 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8593 {
8594         struct btrfs_block_group_cache *block_group;
8595         u64 free_bytes = 0;
8596         int factor;
8597
8598         /* It's df, we don't care if it's racey */
8599         if (list_empty(&sinfo->ro_bgs))
8600                 return 0;
8601
8602         spin_lock(&sinfo->lock);
8603         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8604                 spin_lock(&block_group->lock);
8605
8606                 if (!block_group->ro) {
8607                         spin_unlock(&block_group->lock);
8608                         continue;
8609                 }
8610
8611                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8612                                           BTRFS_BLOCK_GROUP_RAID10 |
8613                                           BTRFS_BLOCK_GROUP_DUP))
8614                         factor = 2;
8615                 else
8616                         factor = 1;
8617
8618                 free_bytes += (block_group->key.offset -
8619                                btrfs_block_group_used(&block_group->item)) *
8620                                factor;
8621
8622                 spin_unlock(&block_group->lock);
8623         }
8624         spin_unlock(&sinfo->lock);
8625
8626         return free_bytes;
8627 }
8628
8629 void btrfs_set_block_group_rw(struct btrfs_root *root,
8630                               struct btrfs_block_group_cache *cache)
8631 {
8632         struct btrfs_space_info *sinfo = cache->space_info;
8633         u64 num_bytes;
8634
8635         BUG_ON(!cache->ro);
8636
8637         spin_lock(&sinfo->lock);
8638         spin_lock(&cache->lock);
8639         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8640                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8641         sinfo->bytes_readonly -= num_bytes;
8642         cache->ro = 0;
8643         list_del_init(&cache->ro_list);
8644         spin_unlock(&cache->lock);
8645         spin_unlock(&sinfo->lock);
8646 }
8647
8648 /*
8649  * checks to see if its even possible to relocate this block group.
8650  *
8651  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8652  * ok to go ahead and try.
8653  */
8654 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8655 {
8656         struct btrfs_block_group_cache *block_group;
8657         struct btrfs_space_info *space_info;
8658         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8659         struct btrfs_device *device;
8660         struct btrfs_trans_handle *trans;
8661         u64 min_free;
8662         u64 dev_min = 1;
8663         u64 dev_nr = 0;
8664         u64 target;
8665         int index;
8666         int full = 0;
8667         int ret = 0;
8668
8669         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8670
8671         /* odd, couldn't find the block group, leave it alone */
8672         if (!block_group)
8673                 return -1;
8674
8675         min_free = btrfs_block_group_used(&block_group->item);
8676
8677         /* no bytes used, we're good */
8678         if (!min_free)
8679                 goto out;
8680
8681         space_info = block_group->space_info;
8682         spin_lock(&space_info->lock);
8683
8684         full = space_info->full;
8685
8686         /*
8687          * if this is the last block group we have in this space, we can't
8688          * relocate it unless we're able to allocate a new chunk below.
8689          *
8690          * Otherwise, we need to make sure we have room in the space to handle
8691          * all of the extents from this block group.  If we can, we're good
8692          */
8693         if ((space_info->total_bytes != block_group->key.offset) &&
8694             (space_info->bytes_used + space_info->bytes_reserved +
8695              space_info->bytes_pinned + space_info->bytes_readonly +
8696              min_free < space_info->total_bytes)) {
8697                 spin_unlock(&space_info->lock);
8698                 goto out;
8699         }
8700         spin_unlock(&space_info->lock);
8701
8702         /*
8703          * ok we don't have enough space, but maybe we have free space on our
8704          * devices to allocate new chunks for relocation, so loop through our
8705          * alloc devices and guess if we have enough space.  if this block
8706          * group is going to be restriped, run checks against the target
8707          * profile instead of the current one.
8708          */
8709         ret = -1;
8710
8711         /*
8712          * index:
8713          *      0: raid10
8714          *      1: raid1
8715          *      2: dup
8716          *      3: raid0
8717          *      4: single
8718          */
8719         target = get_restripe_target(root->fs_info, block_group->flags);
8720         if (target) {
8721                 index = __get_raid_index(extended_to_chunk(target));
8722         } else {
8723                 /*
8724                  * this is just a balance, so if we were marked as full
8725                  * we know there is no space for a new chunk
8726                  */
8727                 if (full)
8728                         goto out;
8729
8730                 index = get_block_group_index(block_group);
8731         }
8732
8733         if (index == BTRFS_RAID_RAID10) {
8734                 dev_min = 4;
8735                 /* Divide by 2 */
8736                 min_free >>= 1;
8737         } else if (index == BTRFS_RAID_RAID1) {
8738                 dev_min = 2;
8739         } else if (index == BTRFS_RAID_DUP) {
8740                 /* Multiply by 2 */
8741                 min_free <<= 1;
8742         } else if (index == BTRFS_RAID_RAID0) {
8743                 dev_min = fs_devices->rw_devices;
8744                 min_free = div64_u64(min_free, dev_min);
8745         }
8746
8747         /* We need to do this so that we can look at pending chunks */
8748         trans = btrfs_join_transaction(root);
8749         if (IS_ERR(trans)) {
8750                 ret = PTR_ERR(trans);
8751                 goto out;
8752         }
8753
8754         mutex_lock(&root->fs_info->chunk_mutex);
8755         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8756                 u64 dev_offset;
8757
8758                 /*
8759                  * check to make sure we can actually find a chunk with enough
8760                  * space to fit our block group in.
8761                  */
8762                 if (device->total_bytes > device->bytes_used + min_free &&
8763                     !device->is_tgtdev_for_dev_replace) {
8764                         ret = find_free_dev_extent(trans, device, min_free,
8765                                                    &dev_offset, NULL);
8766                         if (!ret)
8767                                 dev_nr++;
8768
8769                         if (dev_nr >= dev_min)
8770                                 break;
8771
8772                         ret = -1;
8773                 }
8774         }
8775         mutex_unlock(&root->fs_info->chunk_mutex);
8776         btrfs_end_transaction(trans, root);
8777 out:
8778         btrfs_put_block_group(block_group);
8779         return ret;
8780 }
8781
8782 static int find_first_block_group(struct btrfs_root *root,
8783                 struct btrfs_path *path, struct btrfs_key *key)
8784 {
8785         int ret = 0;
8786         struct btrfs_key found_key;
8787         struct extent_buffer *leaf;
8788         int slot;
8789
8790         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8791         if (ret < 0)
8792                 goto out;
8793
8794         while (1) {
8795                 slot = path->slots[0];
8796                 leaf = path->nodes[0];
8797                 if (slot >= btrfs_header_nritems(leaf)) {
8798                         ret = btrfs_next_leaf(root, path);
8799                         if (ret == 0)
8800                                 continue;
8801                         if (ret < 0)
8802                                 goto out;
8803                         break;
8804                 }
8805                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8806
8807                 if (found_key.objectid >= key->objectid &&
8808                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8809                         ret = 0;
8810                         goto out;
8811                 }
8812                 path->slots[0]++;
8813         }
8814 out:
8815         return ret;
8816 }
8817
8818 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8819 {
8820         struct btrfs_block_group_cache *block_group;
8821         u64 last = 0;
8822
8823         while (1) {
8824                 struct inode *inode;
8825
8826                 block_group = btrfs_lookup_first_block_group(info, last);
8827                 while (block_group) {
8828                         spin_lock(&block_group->lock);
8829                         if (block_group->iref)
8830                                 break;
8831                         spin_unlock(&block_group->lock);
8832                         block_group = next_block_group(info->tree_root,
8833                                                        block_group);
8834                 }
8835                 if (!block_group) {
8836                         if (last == 0)
8837                                 break;
8838                         last = 0;
8839                         continue;
8840                 }
8841
8842                 inode = block_group->inode;
8843                 block_group->iref = 0;
8844                 block_group->inode = NULL;
8845                 spin_unlock(&block_group->lock);
8846                 iput(inode);
8847                 last = block_group->key.objectid + block_group->key.offset;
8848                 btrfs_put_block_group(block_group);
8849         }
8850 }
8851
8852 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8853 {
8854         struct btrfs_block_group_cache *block_group;
8855         struct btrfs_space_info *space_info;
8856         struct btrfs_caching_control *caching_ctl;
8857         struct rb_node *n;
8858
8859         down_write(&info->commit_root_sem);
8860         while (!list_empty(&info->caching_block_groups)) {
8861                 caching_ctl = list_entry(info->caching_block_groups.next,
8862                                          struct btrfs_caching_control, list);
8863                 list_del(&caching_ctl->list);
8864                 put_caching_control(caching_ctl);
8865         }
8866         up_write(&info->commit_root_sem);
8867
8868         spin_lock(&info->unused_bgs_lock);
8869         while (!list_empty(&info->unused_bgs)) {
8870                 block_group = list_first_entry(&info->unused_bgs,
8871                                                struct btrfs_block_group_cache,
8872                                                bg_list);
8873                 list_del_init(&block_group->bg_list);
8874                 btrfs_put_block_group(block_group);
8875         }
8876         spin_unlock(&info->unused_bgs_lock);
8877
8878         spin_lock(&info->block_group_cache_lock);
8879         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8880                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8881                                        cache_node);
8882                 rb_erase(&block_group->cache_node,
8883                          &info->block_group_cache_tree);
8884                 RB_CLEAR_NODE(&block_group->cache_node);
8885                 spin_unlock(&info->block_group_cache_lock);
8886
8887                 down_write(&block_group->space_info->groups_sem);
8888                 list_del(&block_group->list);
8889                 up_write(&block_group->space_info->groups_sem);
8890
8891                 if (block_group->cached == BTRFS_CACHE_STARTED)
8892                         wait_block_group_cache_done(block_group);
8893
8894                 /*
8895                  * We haven't cached this block group, which means we could
8896                  * possibly have excluded extents on this block group.
8897                  */
8898                 if (block_group->cached == BTRFS_CACHE_NO ||
8899                     block_group->cached == BTRFS_CACHE_ERROR)
8900                         free_excluded_extents(info->extent_root, block_group);
8901
8902                 btrfs_remove_free_space_cache(block_group);
8903                 btrfs_put_block_group(block_group);
8904
8905                 spin_lock(&info->block_group_cache_lock);
8906         }
8907         spin_unlock(&info->block_group_cache_lock);
8908
8909         /* now that all the block groups are freed, go through and
8910          * free all the space_info structs.  This is only called during
8911          * the final stages of unmount, and so we know nobody is
8912          * using them.  We call synchronize_rcu() once before we start,
8913          * just to be on the safe side.
8914          */
8915         synchronize_rcu();
8916
8917         release_global_block_rsv(info);
8918
8919         while (!list_empty(&info->space_info)) {
8920                 int i;
8921
8922                 space_info = list_entry(info->space_info.next,
8923                                         struct btrfs_space_info,
8924                                         list);
8925                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8926                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8927                             space_info->bytes_reserved > 0 ||
8928                             space_info->bytes_may_use > 0)) {
8929                                 dump_space_info(space_info, 0, 0);
8930                         }
8931                 }
8932                 list_del(&space_info->list);
8933                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8934                         struct kobject *kobj;
8935                         kobj = space_info->block_group_kobjs[i];
8936                         space_info->block_group_kobjs[i] = NULL;
8937                         if (kobj) {
8938                                 kobject_del(kobj);
8939                                 kobject_put(kobj);
8940                         }
8941                 }
8942                 kobject_del(&space_info->kobj);
8943                 kobject_put(&space_info->kobj);
8944         }
8945         return 0;
8946 }
8947
8948 static void __link_block_group(struct btrfs_space_info *space_info,
8949                                struct btrfs_block_group_cache *cache)
8950 {
8951         int index = get_block_group_index(cache);
8952         bool first = false;
8953
8954         down_write(&space_info->groups_sem);
8955         if (list_empty(&space_info->block_groups[index]))
8956                 first = true;
8957         list_add_tail(&cache->list, &space_info->block_groups[index]);
8958         up_write(&space_info->groups_sem);
8959
8960         if (first) {
8961                 struct raid_kobject *rkobj;
8962                 int ret;
8963
8964                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8965                 if (!rkobj)
8966                         goto out_err;
8967                 rkobj->raid_type = index;
8968                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8969                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8970                                   "%s", get_raid_name(index));
8971                 if (ret) {
8972                         kobject_put(&rkobj->kobj);
8973                         goto out_err;
8974                 }
8975                 space_info->block_group_kobjs[index] = &rkobj->kobj;
8976         }
8977
8978         return;
8979 out_err:
8980         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8981 }
8982
8983 static struct btrfs_block_group_cache *
8984 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8985 {
8986         struct btrfs_block_group_cache *cache;
8987
8988         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8989         if (!cache)
8990                 return NULL;
8991
8992         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8993                                         GFP_NOFS);
8994         if (!cache->free_space_ctl) {
8995                 kfree(cache);
8996                 return NULL;
8997         }
8998
8999         cache->key.objectid = start;
9000         cache->key.offset = size;
9001         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9002
9003         cache->sectorsize = root->sectorsize;
9004         cache->fs_info = root->fs_info;
9005         cache->full_stripe_len = btrfs_full_stripe_len(root,
9006                                                &root->fs_info->mapping_tree,
9007                                                start);
9008         atomic_set(&cache->count, 1);
9009         spin_lock_init(&cache->lock);
9010         init_rwsem(&cache->data_rwsem);
9011         INIT_LIST_HEAD(&cache->list);
9012         INIT_LIST_HEAD(&cache->cluster_list);
9013         INIT_LIST_HEAD(&cache->bg_list);
9014         INIT_LIST_HEAD(&cache->ro_list);
9015         INIT_LIST_HEAD(&cache->dirty_list);
9016         btrfs_init_free_space_ctl(cache);
9017         atomic_set(&cache->trimming, 0);
9018
9019         return cache;
9020 }
9021
9022 int btrfs_read_block_groups(struct btrfs_root *root)
9023 {
9024         struct btrfs_path *path;
9025         int ret;
9026         struct btrfs_block_group_cache *cache;
9027         struct btrfs_fs_info *info = root->fs_info;
9028         struct btrfs_space_info *space_info;
9029         struct btrfs_key key;
9030         struct btrfs_key found_key;
9031         struct extent_buffer *leaf;
9032         int need_clear = 0;
9033         u64 cache_gen;
9034
9035         root = info->extent_root;
9036         key.objectid = 0;
9037         key.offset = 0;
9038         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9039         path = btrfs_alloc_path();
9040         if (!path)
9041                 return -ENOMEM;
9042         path->reada = 1;
9043
9044         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9045         if (btrfs_test_opt(root, SPACE_CACHE) &&
9046             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9047                 need_clear = 1;
9048         if (btrfs_test_opt(root, CLEAR_CACHE))
9049                 need_clear = 1;
9050
9051         while (1) {
9052                 ret = find_first_block_group(root, path, &key);
9053                 if (ret > 0)
9054                         break;
9055                 if (ret != 0)
9056                         goto error;
9057
9058                 leaf = path->nodes[0];
9059                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9060
9061                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9062                                                        found_key.offset);
9063                 if (!cache) {
9064                         ret = -ENOMEM;
9065                         goto error;
9066                 }
9067
9068                 if (need_clear) {
9069                         /*
9070                          * When we mount with old space cache, we need to
9071                          * set BTRFS_DC_CLEAR and set dirty flag.
9072                          *
9073                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9074                          *    truncate the old free space cache inode and
9075                          *    setup a new one.
9076                          * b) Setting 'dirty flag' makes sure that we flush
9077                          *    the new space cache info onto disk.
9078                          */
9079                         if (btrfs_test_opt(root, SPACE_CACHE))
9080                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9081                 }
9082
9083                 read_extent_buffer(leaf, &cache->item,
9084                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9085                                    sizeof(cache->item));
9086                 cache->flags = btrfs_block_group_flags(&cache->item);
9087
9088                 key.objectid = found_key.objectid + found_key.offset;
9089                 btrfs_release_path(path);
9090
9091                 /*
9092                  * We need to exclude the super stripes now so that the space
9093                  * info has super bytes accounted for, otherwise we'll think
9094                  * we have more space than we actually do.
9095                  */
9096                 ret = exclude_super_stripes(root, cache);
9097                 if (ret) {
9098                         /*
9099                          * We may have excluded something, so call this just in
9100                          * case.
9101                          */
9102                         free_excluded_extents(root, cache);
9103                         btrfs_put_block_group(cache);
9104                         goto error;
9105                 }
9106
9107                 /*
9108                  * check for two cases, either we are full, and therefore
9109                  * don't need to bother with the caching work since we won't
9110                  * find any space, or we are empty, and we can just add all
9111                  * the space in and be done with it.  This saves us _alot_ of
9112                  * time, particularly in the full case.
9113                  */
9114                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9115                         cache->last_byte_to_unpin = (u64)-1;
9116                         cache->cached = BTRFS_CACHE_FINISHED;
9117                         free_excluded_extents(root, cache);
9118                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9119                         cache->last_byte_to_unpin = (u64)-1;
9120                         cache->cached = BTRFS_CACHE_FINISHED;
9121                         add_new_free_space(cache, root->fs_info,
9122                                            found_key.objectid,
9123                                            found_key.objectid +
9124                                            found_key.offset);
9125                         free_excluded_extents(root, cache);
9126                 }
9127
9128                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9129                 if (ret) {
9130                         btrfs_remove_free_space_cache(cache);
9131                         btrfs_put_block_group(cache);
9132                         goto error;
9133                 }
9134
9135                 ret = update_space_info(info, cache->flags, found_key.offset,
9136                                         btrfs_block_group_used(&cache->item),
9137                                         &space_info);
9138                 if (ret) {
9139                         btrfs_remove_free_space_cache(cache);
9140                         spin_lock(&info->block_group_cache_lock);
9141                         rb_erase(&cache->cache_node,
9142                                  &info->block_group_cache_tree);
9143                         RB_CLEAR_NODE(&cache->cache_node);
9144                         spin_unlock(&info->block_group_cache_lock);
9145                         btrfs_put_block_group(cache);
9146                         goto error;
9147                 }
9148
9149                 cache->space_info = space_info;
9150                 spin_lock(&cache->space_info->lock);
9151                 cache->space_info->bytes_readonly += cache->bytes_super;
9152                 spin_unlock(&cache->space_info->lock);
9153
9154                 __link_block_group(space_info, cache);
9155
9156                 set_avail_alloc_bits(root->fs_info, cache->flags);
9157                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9158                         set_block_group_ro(cache, 1);
9159                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9160                         spin_lock(&info->unused_bgs_lock);
9161                         /* Should always be true but just in case. */
9162                         if (list_empty(&cache->bg_list)) {
9163                                 btrfs_get_block_group(cache);
9164                                 list_add_tail(&cache->bg_list,
9165                                               &info->unused_bgs);
9166                         }
9167                         spin_unlock(&info->unused_bgs_lock);
9168                 }
9169         }
9170
9171         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9172                 if (!(get_alloc_profile(root, space_info->flags) &
9173                       (BTRFS_BLOCK_GROUP_RAID10 |
9174                        BTRFS_BLOCK_GROUP_RAID1 |
9175                        BTRFS_BLOCK_GROUP_RAID5 |
9176                        BTRFS_BLOCK_GROUP_RAID6 |
9177                        BTRFS_BLOCK_GROUP_DUP)))
9178                         continue;
9179                 /*
9180                  * avoid allocating from un-mirrored block group if there are
9181                  * mirrored block groups.
9182                  */
9183                 list_for_each_entry(cache,
9184                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9185                                 list)
9186                         set_block_group_ro(cache, 1);
9187                 list_for_each_entry(cache,
9188                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9189                                 list)
9190                         set_block_group_ro(cache, 1);
9191         }
9192
9193         init_global_block_rsv(info);
9194         ret = 0;
9195 error:
9196         btrfs_free_path(path);
9197         return ret;
9198 }
9199
9200 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9201                                        struct btrfs_root *root)
9202 {
9203         struct btrfs_block_group_cache *block_group, *tmp;
9204         struct btrfs_root *extent_root = root->fs_info->extent_root;
9205         struct btrfs_block_group_item item;
9206         struct btrfs_key key;
9207         int ret = 0;
9208
9209         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9210                 if (ret)
9211                         goto next;
9212
9213                 spin_lock(&block_group->lock);
9214                 memcpy(&item, &block_group->item, sizeof(item));
9215                 memcpy(&key, &block_group->key, sizeof(key));
9216                 spin_unlock(&block_group->lock);
9217
9218                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9219                                         sizeof(item));
9220                 if (ret)
9221                         btrfs_abort_transaction(trans, extent_root, ret);
9222                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9223                                                key.objectid, key.offset);
9224                 if (ret)
9225                         btrfs_abort_transaction(trans, extent_root, ret);
9226 next:
9227                 list_del_init(&block_group->bg_list);
9228         }
9229 }
9230
9231 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9232                            struct btrfs_root *root, u64 bytes_used,
9233                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9234                            u64 size)
9235 {
9236         int ret;
9237         struct btrfs_root *extent_root;
9238         struct btrfs_block_group_cache *cache;
9239
9240         extent_root = root->fs_info->extent_root;
9241
9242         btrfs_set_log_full_commit(root->fs_info, trans);
9243
9244         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9245         if (!cache)
9246                 return -ENOMEM;
9247
9248         btrfs_set_block_group_used(&cache->item, bytes_used);
9249         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9250         btrfs_set_block_group_flags(&cache->item, type);
9251
9252         cache->flags = type;
9253         cache->last_byte_to_unpin = (u64)-1;
9254         cache->cached = BTRFS_CACHE_FINISHED;
9255         ret = exclude_super_stripes(root, cache);
9256         if (ret) {
9257                 /*
9258                  * We may have excluded something, so call this just in
9259                  * case.
9260                  */
9261                 free_excluded_extents(root, cache);
9262                 btrfs_put_block_group(cache);
9263                 return ret;
9264         }
9265
9266         add_new_free_space(cache, root->fs_info, chunk_offset,
9267                            chunk_offset + size);
9268
9269         free_excluded_extents(root, cache);
9270
9271         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9272         if (ret) {
9273                 btrfs_remove_free_space_cache(cache);
9274                 btrfs_put_block_group(cache);
9275                 return ret;
9276         }
9277
9278         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9279                                 &cache->space_info);
9280         if (ret) {
9281                 btrfs_remove_free_space_cache(cache);
9282                 spin_lock(&root->fs_info->block_group_cache_lock);
9283                 rb_erase(&cache->cache_node,
9284                          &root->fs_info->block_group_cache_tree);
9285                 RB_CLEAR_NODE(&cache->cache_node);
9286                 spin_unlock(&root->fs_info->block_group_cache_lock);
9287                 btrfs_put_block_group(cache);
9288                 return ret;
9289         }
9290         update_global_block_rsv(root->fs_info);
9291
9292         spin_lock(&cache->space_info->lock);
9293         cache->space_info->bytes_readonly += cache->bytes_super;
9294         spin_unlock(&cache->space_info->lock);
9295
9296         __link_block_group(cache->space_info, cache);
9297
9298         list_add_tail(&cache->bg_list, &trans->new_bgs);
9299
9300         set_avail_alloc_bits(extent_root->fs_info, type);
9301
9302         return 0;
9303 }
9304
9305 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9306 {
9307         u64 extra_flags = chunk_to_extended(flags) &
9308                                 BTRFS_EXTENDED_PROFILE_MASK;
9309
9310         write_seqlock(&fs_info->profiles_lock);
9311         if (flags & BTRFS_BLOCK_GROUP_DATA)
9312                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9313         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9314                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9315         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9316                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9317         write_sequnlock(&fs_info->profiles_lock);
9318 }
9319
9320 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9321                              struct btrfs_root *root, u64 group_start,
9322                              struct extent_map *em)
9323 {
9324         struct btrfs_path *path;
9325         struct btrfs_block_group_cache *block_group;
9326         struct btrfs_free_cluster *cluster;
9327         struct btrfs_root *tree_root = root->fs_info->tree_root;
9328         struct btrfs_key key;
9329         struct inode *inode;
9330         struct kobject *kobj = NULL;
9331         int ret;
9332         int index;
9333         int factor;
9334         struct btrfs_caching_control *caching_ctl = NULL;
9335         bool remove_em;
9336
9337         root = root->fs_info->extent_root;
9338
9339         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9340         BUG_ON(!block_group);
9341         BUG_ON(!block_group->ro);
9342
9343         /*
9344          * Free the reserved super bytes from this block group before
9345          * remove it.
9346          */
9347         free_excluded_extents(root, block_group);
9348
9349         memcpy(&key, &block_group->key, sizeof(key));
9350         index = get_block_group_index(block_group);
9351         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9352                                   BTRFS_BLOCK_GROUP_RAID1 |
9353                                   BTRFS_BLOCK_GROUP_RAID10))
9354                 factor = 2;
9355         else
9356                 factor = 1;
9357
9358         /* make sure this block group isn't part of an allocation cluster */
9359         cluster = &root->fs_info->data_alloc_cluster;
9360         spin_lock(&cluster->refill_lock);
9361         btrfs_return_cluster_to_free_space(block_group, cluster);
9362         spin_unlock(&cluster->refill_lock);
9363
9364         /*
9365          * make sure this block group isn't part of a metadata
9366          * allocation cluster
9367          */
9368         cluster = &root->fs_info->meta_alloc_cluster;
9369         spin_lock(&cluster->refill_lock);
9370         btrfs_return_cluster_to_free_space(block_group, cluster);
9371         spin_unlock(&cluster->refill_lock);
9372
9373         path = btrfs_alloc_path();
9374         if (!path) {
9375                 ret = -ENOMEM;
9376                 goto out;
9377         }
9378
9379         inode = lookup_free_space_inode(tree_root, block_group, path);
9380         if (!IS_ERR(inode)) {
9381                 ret = btrfs_orphan_add(trans, inode);
9382                 if (ret) {
9383                         btrfs_add_delayed_iput(inode);
9384                         goto out;
9385                 }
9386                 clear_nlink(inode);
9387                 /* One for the block groups ref */
9388                 spin_lock(&block_group->lock);
9389                 if (block_group->iref) {
9390                         block_group->iref = 0;
9391                         block_group->inode = NULL;
9392                         spin_unlock(&block_group->lock);
9393                         iput(inode);
9394                 } else {
9395                         spin_unlock(&block_group->lock);
9396                 }
9397                 /* One for our lookup ref */
9398                 btrfs_add_delayed_iput(inode);
9399         }
9400
9401         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9402         key.offset = block_group->key.objectid;
9403         key.type = 0;
9404
9405         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9406         if (ret < 0)
9407                 goto out;
9408         if (ret > 0)
9409                 btrfs_release_path(path);
9410         if (ret == 0) {
9411                 ret = btrfs_del_item(trans, tree_root, path);
9412                 if (ret)
9413                         goto out;
9414                 btrfs_release_path(path);
9415         }
9416
9417         spin_lock(&root->fs_info->block_group_cache_lock);
9418         rb_erase(&block_group->cache_node,
9419                  &root->fs_info->block_group_cache_tree);
9420         RB_CLEAR_NODE(&block_group->cache_node);
9421
9422         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9423                 root->fs_info->first_logical_byte = (u64)-1;
9424         spin_unlock(&root->fs_info->block_group_cache_lock);
9425
9426         down_write(&block_group->space_info->groups_sem);
9427         /*
9428          * we must use list_del_init so people can check to see if they
9429          * are still on the list after taking the semaphore
9430          */
9431         list_del_init(&block_group->list);
9432         if (list_empty(&block_group->space_info->block_groups[index])) {
9433                 kobj = block_group->space_info->block_group_kobjs[index];
9434                 block_group->space_info->block_group_kobjs[index] = NULL;
9435                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9436         }
9437         up_write(&block_group->space_info->groups_sem);
9438         if (kobj) {
9439                 kobject_del(kobj);
9440                 kobject_put(kobj);
9441         }
9442
9443         if (block_group->has_caching_ctl)
9444                 caching_ctl = get_caching_control(block_group);
9445         if (block_group->cached == BTRFS_CACHE_STARTED)
9446                 wait_block_group_cache_done(block_group);
9447         if (block_group->has_caching_ctl) {
9448                 down_write(&root->fs_info->commit_root_sem);
9449                 if (!caching_ctl) {
9450                         struct btrfs_caching_control *ctl;
9451
9452                         list_for_each_entry(ctl,
9453                                     &root->fs_info->caching_block_groups, list)
9454                                 if (ctl->block_group == block_group) {
9455                                         caching_ctl = ctl;
9456                                         atomic_inc(&caching_ctl->count);
9457                                         break;
9458                                 }
9459                 }
9460                 if (caching_ctl)
9461                         list_del_init(&caching_ctl->list);
9462                 up_write(&root->fs_info->commit_root_sem);
9463                 if (caching_ctl) {
9464                         /* Once for the caching bgs list and once for us. */
9465                         put_caching_control(caching_ctl);
9466                         put_caching_control(caching_ctl);
9467                 }
9468         }
9469
9470         spin_lock(&trans->transaction->dirty_bgs_lock);
9471         if (!list_empty(&block_group->dirty_list)) {
9472                 list_del_init(&block_group->dirty_list);
9473                 btrfs_put_block_group(block_group);
9474         }
9475         spin_unlock(&trans->transaction->dirty_bgs_lock);
9476
9477         btrfs_remove_free_space_cache(block_group);
9478
9479         spin_lock(&block_group->space_info->lock);
9480         list_del_init(&block_group->ro_list);
9481         block_group->space_info->total_bytes -= block_group->key.offset;
9482         block_group->space_info->bytes_readonly -= block_group->key.offset;
9483         block_group->space_info->disk_total -= block_group->key.offset * factor;
9484         spin_unlock(&block_group->space_info->lock);
9485
9486         memcpy(&key, &block_group->key, sizeof(key));
9487
9488         lock_chunks(root);
9489         if (!list_empty(&em->list)) {
9490                 /* We're in the transaction->pending_chunks list. */
9491                 free_extent_map(em);
9492         }
9493         spin_lock(&block_group->lock);
9494         block_group->removed = 1;
9495         /*
9496          * At this point trimming can't start on this block group, because we
9497          * removed the block group from the tree fs_info->block_group_cache_tree
9498          * so no one can't find it anymore and even if someone already got this
9499          * block group before we removed it from the rbtree, they have already
9500          * incremented block_group->trimming - if they didn't, they won't find
9501          * any free space entries because we already removed them all when we
9502          * called btrfs_remove_free_space_cache().
9503          *
9504          * And we must not remove the extent map from the fs_info->mapping_tree
9505          * to prevent the same logical address range and physical device space
9506          * ranges from being reused for a new block group. This is because our
9507          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9508          * completely transactionless, so while it is trimming a range the
9509          * currently running transaction might finish and a new one start,
9510          * allowing for new block groups to be created that can reuse the same
9511          * physical device locations unless we take this special care.
9512          */
9513         remove_em = (atomic_read(&block_group->trimming) == 0);
9514         /*
9515          * Make sure a trimmer task always sees the em in the pinned_chunks list
9516          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9517          * before checking block_group->removed).
9518          */
9519         if (!remove_em) {
9520                 /*
9521                  * Our em might be in trans->transaction->pending_chunks which
9522                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9523                  * and so is the fs_info->pinned_chunks list.
9524                  *
9525                  * So at this point we must be holding the chunk_mutex to avoid
9526                  * any races with chunk allocation (more specifically at
9527                  * volumes.c:contains_pending_extent()), to ensure it always
9528                  * sees the em, either in the pending_chunks list or in the
9529                  * pinned_chunks list.
9530                  */
9531                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9532         }
9533         spin_unlock(&block_group->lock);
9534
9535         if (remove_em) {
9536                 struct extent_map_tree *em_tree;
9537
9538                 em_tree = &root->fs_info->mapping_tree.map_tree;
9539                 write_lock(&em_tree->lock);
9540                 /*
9541                  * The em might be in the pending_chunks list, so make sure the
9542                  * chunk mutex is locked, since remove_extent_mapping() will
9543                  * delete us from that list.
9544                  */
9545                 remove_extent_mapping(em_tree, em);
9546                 write_unlock(&em_tree->lock);
9547                 /* once for the tree */
9548                 free_extent_map(em);
9549         }
9550
9551         unlock_chunks(root);
9552
9553         btrfs_put_block_group(block_group);
9554         btrfs_put_block_group(block_group);
9555
9556         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9557         if (ret > 0)
9558                 ret = -EIO;
9559         if (ret < 0)
9560                 goto out;
9561
9562         ret = btrfs_del_item(trans, root, path);
9563 out:
9564         btrfs_free_path(path);
9565         return ret;
9566 }
9567
9568 /*
9569  * Process the unused_bgs list and remove any that don't have any allocated
9570  * space inside of them.
9571  */
9572 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9573 {
9574         struct btrfs_block_group_cache *block_group;
9575         struct btrfs_space_info *space_info;
9576         struct btrfs_root *root = fs_info->extent_root;
9577         struct btrfs_trans_handle *trans;
9578         int ret = 0;
9579
9580         if (!fs_info->open)
9581                 return;
9582
9583         spin_lock(&fs_info->unused_bgs_lock);
9584         while (!list_empty(&fs_info->unused_bgs)) {
9585                 u64 start, end;
9586
9587                 block_group = list_first_entry(&fs_info->unused_bgs,
9588                                                struct btrfs_block_group_cache,
9589                                                bg_list);
9590                 space_info = block_group->space_info;
9591                 list_del_init(&block_group->bg_list);
9592                 if (ret || btrfs_mixed_space_info(space_info)) {
9593                         btrfs_put_block_group(block_group);
9594                         continue;
9595                 }
9596                 spin_unlock(&fs_info->unused_bgs_lock);
9597
9598                 /* Don't want to race with allocators so take the groups_sem */
9599                 down_write(&space_info->groups_sem);
9600                 spin_lock(&block_group->lock);
9601                 if (block_group->reserved ||
9602                     btrfs_block_group_used(&block_group->item) ||
9603                     block_group->ro) {
9604                         /*
9605                          * We want to bail if we made new allocations or have
9606                          * outstanding allocations in this block group.  We do
9607                          * the ro check in case balance is currently acting on
9608                          * this block group.
9609                          */
9610                         spin_unlock(&block_group->lock);
9611                         up_write(&space_info->groups_sem);
9612                         goto next;
9613                 }
9614                 spin_unlock(&block_group->lock);
9615
9616                 /* We don't want to force the issue, only flip if it's ok. */
9617                 ret = set_block_group_ro(block_group, 0);
9618                 up_write(&space_info->groups_sem);
9619                 if (ret < 0) {
9620                         ret = 0;
9621                         goto next;
9622                 }
9623
9624                 /*
9625                  * Want to do this before we do anything else so we can recover
9626                  * properly if we fail to join the transaction.
9627                  */
9628                 /* 1 for btrfs_orphan_reserve_metadata() */
9629                 trans = btrfs_start_transaction(root, 1);
9630                 if (IS_ERR(trans)) {
9631                         btrfs_set_block_group_rw(root, block_group);
9632                         ret = PTR_ERR(trans);
9633                         goto next;
9634                 }
9635
9636                 /*
9637                  * We could have pending pinned extents for this block group,
9638                  * just delete them, we don't care about them anymore.
9639                  */
9640                 start = block_group->key.objectid;
9641                 end = start + block_group->key.offset - 1;
9642                 /*
9643                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
9644                  * btrfs_finish_extent_commit(). If we are at transaction N,
9645                  * another task might be running finish_extent_commit() for the
9646                  * previous transaction N - 1, and have seen a range belonging
9647                  * to the block group in freed_extents[] before we were able to
9648                  * clear the whole block group range from freed_extents[]. This
9649                  * means that task can lookup for the block group after we
9650                  * unpinned it from freed_extents[] and removed it, leading to
9651                  * a BUG_ON() at btrfs_unpin_extent_range().
9652                  */
9653                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9654                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9655                                   EXTENT_DIRTY, GFP_NOFS);
9656                 if (ret) {
9657                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9658                         btrfs_set_block_group_rw(root, block_group);
9659                         goto end_trans;
9660                 }
9661                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9662                                   EXTENT_DIRTY, GFP_NOFS);
9663                 if (ret) {
9664                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9665                         btrfs_set_block_group_rw(root, block_group);
9666                         goto end_trans;
9667                 }
9668                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9669
9670                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9671                 block_group->pinned = 0;
9672
9673                 /*
9674                  * Btrfs_remove_chunk will abort the transaction if things go
9675                  * horribly wrong.
9676                  */
9677                 ret = btrfs_remove_chunk(trans, root,
9678                                          block_group->key.objectid);
9679 end_trans:
9680                 btrfs_end_transaction(trans, root);
9681 next:
9682                 btrfs_put_block_group(block_group);
9683                 spin_lock(&fs_info->unused_bgs_lock);
9684         }
9685         spin_unlock(&fs_info->unused_bgs_lock);
9686 }
9687
9688 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9689 {
9690         struct btrfs_space_info *space_info;
9691         struct btrfs_super_block *disk_super;
9692         u64 features;
9693         u64 flags;
9694         int mixed = 0;
9695         int ret;
9696
9697         disk_super = fs_info->super_copy;
9698         if (!btrfs_super_root(disk_super))
9699                 return 1;
9700
9701         features = btrfs_super_incompat_flags(disk_super);
9702         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9703                 mixed = 1;
9704
9705         flags = BTRFS_BLOCK_GROUP_SYSTEM;
9706         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9707         if (ret)
9708                 goto out;
9709
9710         if (mixed) {
9711                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9712                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9713         } else {
9714                 flags = BTRFS_BLOCK_GROUP_METADATA;
9715                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9716                 if (ret)
9717                         goto out;
9718
9719                 flags = BTRFS_BLOCK_GROUP_DATA;
9720                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9721         }
9722 out:
9723         return ret;
9724 }
9725
9726 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9727 {
9728         return unpin_extent_range(root, start, end, false);
9729 }
9730
9731 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9732 {
9733         struct btrfs_fs_info *fs_info = root->fs_info;
9734         struct btrfs_block_group_cache *cache = NULL;
9735         u64 group_trimmed;
9736         u64 start;
9737         u64 end;
9738         u64 trimmed = 0;
9739         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9740         int ret = 0;
9741
9742         /*
9743          * try to trim all FS space, our block group may start from non-zero.
9744          */
9745         if (range->len == total_bytes)
9746                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9747         else
9748                 cache = btrfs_lookup_block_group(fs_info, range->start);
9749
9750         while (cache) {
9751                 if (cache->key.objectid >= (range->start + range->len)) {
9752                         btrfs_put_block_group(cache);
9753                         break;
9754                 }
9755
9756                 start = max(range->start, cache->key.objectid);
9757                 end = min(range->start + range->len,
9758                                 cache->key.objectid + cache->key.offset);
9759
9760                 if (end - start >= range->minlen) {
9761                         if (!block_group_cache_done(cache)) {
9762                                 ret = cache_block_group(cache, 0);
9763                                 if (ret) {
9764                                         btrfs_put_block_group(cache);
9765                                         break;
9766                                 }
9767                                 ret = wait_block_group_cache_done(cache);
9768                                 if (ret) {
9769                                         btrfs_put_block_group(cache);
9770                                         break;
9771                                 }
9772                         }
9773                         ret = btrfs_trim_block_group(cache,
9774                                                      &group_trimmed,
9775                                                      start,
9776                                                      end,
9777                                                      range->minlen);
9778
9779                         trimmed += group_trimmed;
9780                         if (ret) {
9781                                 btrfs_put_block_group(cache);
9782                                 break;
9783                         }
9784                 }
9785
9786                 cache = next_block_group(fs_info->tree_root, cache);
9787         }
9788
9789         range->len = trimmed;
9790         return ret;
9791 }
9792
9793 /*
9794  * btrfs_{start,end}_write_no_snapshoting() are similar to
9795  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9796  * data into the page cache through nocow before the subvolume is snapshoted,
9797  * but flush the data into disk after the snapshot creation, or to prevent
9798  * operations while snapshoting is ongoing and that cause the snapshot to be
9799  * inconsistent (writes followed by expanding truncates for example).
9800  */
9801 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9802 {
9803         percpu_counter_dec(&root->subv_writers->counter);
9804         /*
9805          * Make sure counter is updated before we wake up
9806          * waiters.
9807          */
9808         smp_mb();
9809         if (waitqueue_active(&root->subv_writers->wait))
9810                 wake_up(&root->subv_writers->wait);
9811 }
9812
9813 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9814 {
9815         if (atomic_read(&root->will_be_snapshoted))
9816                 return 0;
9817
9818         percpu_counter_inc(&root->subv_writers->counter);
9819         /*
9820          * Make sure counter is updated before we check for snapshot creation.
9821          */
9822         smp_mb();
9823         if (atomic_read(&root->will_be_snapshoted)) {
9824                 btrfs_end_write_no_snapshoting(root);
9825                 return 0;
9826         }
9827         return 1;
9828 }