07204bf601edac25ad2757bbd7c85961eeabc56c
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                               struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static noinline void caching_thread(struct btrfs_work *work)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_caching_control *caching_ctl;
383         struct btrfs_root *extent_root;
384         struct btrfs_path *path;
385         struct extent_buffer *leaf;
386         struct btrfs_key key;
387         u64 total_found = 0;
388         u64 last = 0;
389         u32 nritems;
390         int ret = -ENOMEM;
391
392         caching_ctl = container_of(work, struct btrfs_caching_control, work);
393         block_group = caching_ctl->block_group;
394         fs_info = block_group->fs_info;
395         extent_root = fs_info->extent_root;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 goto out;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403         /*
404          * We don't want to deadlock with somebody trying to allocate a new
405          * extent for the extent root while also trying to search the extent
406          * root to add free space.  So we skip locking and search the commit
407          * root, since its read-only
408          */
409         path->skip_locking = 1;
410         path->search_commit_root = 1;
411         path->reada = 1;
412
413         key.objectid = last;
414         key.offset = 0;
415         key.type = BTRFS_EXTENT_ITEM_KEY;
416 again:
417         mutex_lock(&caching_ctl->mutex);
418         /* need to make sure the commit_root doesn't disappear */
419         down_read(&fs_info->commit_root_sem);
420
421 next:
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched() ||
443                             rwsem_is_contended(&fs_info->commit_root_sem)) {
444                                 caching_ctl->progress = last;
445                                 btrfs_release_path(path);
446                                 up_read(&fs_info->commit_root_sem);
447                                 mutex_unlock(&caching_ctl->mutex);
448                                 cond_resched();
449                                 goto again;
450                         }
451
452                         ret = btrfs_next_leaf(extent_root, path);
453                         if (ret < 0)
454                                 goto err;
455                         if (ret)
456                                 break;
457                         leaf = path->nodes[0];
458                         nritems = btrfs_header_nritems(leaf);
459                         continue;
460                 }
461
462                 if (key.objectid < last) {
463                         key.objectid = last;
464                         key.offset = 0;
465                         key.type = BTRFS_EXTENT_ITEM_KEY;
466
467                         caching_ctl->progress = last;
468                         btrfs_release_path(path);
469                         goto next;
470                 }
471
472                 if (key.objectid < block_group->key.objectid) {
473                         path->slots[0]++;
474                         continue;
475                 }
476
477                 if (key.objectid >= block_group->key.objectid +
478                     block_group->key.offset)
479                         break;
480
481                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
482                     key.type == BTRFS_METADATA_ITEM_KEY) {
483                         total_found += add_new_free_space(block_group,
484                                                           fs_info, last,
485                                                           key.objectid);
486                         if (key.type == BTRFS_METADATA_ITEM_KEY)
487                                 last = key.objectid +
488                                         fs_info->tree_root->nodesize;
489                         else
490                                 last = key.objectid + key.offset;
491
492                         if (total_found > (1024 * 1024 * 2)) {
493                                 total_found = 0;
494                                 wake_up(&caching_ctl->wait);
495                         }
496                 }
497                 path->slots[0]++;
498         }
499         ret = 0;
500
501         total_found += add_new_free_space(block_group, fs_info, last,
502                                           block_group->key.objectid +
503                                           block_group->key.offset);
504         caching_ctl->progress = (u64)-1;
505
506         spin_lock(&block_group->lock);
507         block_group->caching_ctl = NULL;
508         block_group->cached = BTRFS_CACHE_FINISHED;
509         spin_unlock(&block_group->lock);
510
511 err:
512         btrfs_free_path(path);
513         up_read(&fs_info->commit_root_sem);
514
515         free_excluded_extents(extent_root, block_group);
516
517         mutex_unlock(&caching_ctl->mutex);
518 out:
519         if (ret) {
520                 spin_lock(&block_group->lock);
521                 block_group->caching_ctl = NULL;
522                 block_group->cached = BTRFS_CACHE_ERROR;
523                 spin_unlock(&block_group->lock);
524         }
525         wake_up(&caching_ctl->wait);
526
527         put_caching_control(caching_ctl);
528         btrfs_put_block_group(block_group);
529 }
530
531 static int cache_block_group(struct btrfs_block_group_cache *cache,
532                              int load_cache_only)
533 {
534         DEFINE_WAIT(wait);
535         struct btrfs_fs_info *fs_info = cache->fs_info;
536         struct btrfs_caching_control *caching_ctl;
537         int ret = 0;
538
539         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
540         if (!caching_ctl)
541                 return -ENOMEM;
542
543         INIT_LIST_HEAD(&caching_ctl->list);
544         mutex_init(&caching_ctl->mutex);
545         init_waitqueue_head(&caching_ctl->wait);
546         caching_ctl->block_group = cache;
547         caching_ctl->progress = cache->key.objectid;
548         atomic_set(&caching_ctl->count, 1);
549         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
550                         caching_thread, NULL, NULL);
551
552         spin_lock(&cache->lock);
553         /*
554          * This should be a rare occasion, but this could happen I think in the
555          * case where one thread starts to load the space cache info, and then
556          * some other thread starts a transaction commit which tries to do an
557          * allocation while the other thread is still loading the space cache
558          * info.  The previous loop should have kept us from choosing this block
559          * group, but if we've moved to the state where we will wait on caching
560          * block groups we need to first check if we're doing a fast load here,
561          * so we can wait for it to finish, otherwise we could end up allocating
562          * from a block group who's cache gets evicted for one reason or
563          * another.
564          */
565         while (cache->cached == BTRFS_CACHE_FAST) {
566                 struct btrfs_caching_control *ctl;
567
568                 ctl = cache->caching_ctl;
569                 atomic_inc(&ctl->count);
570                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
571                 spin_unlock(&cache->lock);
572
573                 schedule();
574
575                 finish_wait(&ctl->wait, &wait);
576                 put_caching_control(ctl);
577                 spin_lock(&cache->lock);
578         }
579
580         if (cache->cached != BTRFS_CACHE_NO) {
581                 spin_unlock(&cache->lock);
582                 kfree(caching_ctl);
583                 return 0;
584         }
585         WARN_ON(cache->caching_ctl);
586         cache->caching_ctl = caching_ctl;
587         cache->cached = BTRFS_CACHE_FAST;
588         spin_unlock(&cache->lock);
589
590         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591                 mutex_lock(&caching_ctl->mutex);
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                         caching_ctl->progress = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                                 cache->has_caching_ctl = 1;
607                         }
608                 }
609                 spin_unlock(&cache->lock);
610                 mutex_unlock(&caching_ctl->mutex);
611
612                 wake_up(&caching_ctl->wait);
613                 if (ret == 1) {
614                         put_caching_control(caching_ctl);
615                         free_excluded_extents(fs_info->extent_root, cache);
616                         return 0;
617                 }
618         } else {
619                 /*
620                  * We are not going to do the fast caching, set cached to the
621                  * appropriate value and wakeup any waiters.
622                  */
623                 spin_lock(&cache->lock);
624                 if (load_cache_only) {
625                         cache->caching_ctl = NULL;
626                         cache->cached = BTRFS_CACHE_NO;
627                 } else {
628                         cache->cached = BTRFS_CACHE_STARTED;
629                         cache->has_caching_ctl = 1;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784                                 &key, path, 0, 0);
785         if (ret < 0)
786                 goto out_free;
787
788         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789                 if (path->slots[0]) {
790                         path->slots[0]--;
791                         btrfs_item_key_to_cpu(path->nodes[0], &key,
792                                               path->slots[0]);
793                         if (key.objectid == bytenr &&
794                             key.type == BTRFS_EXTENT_ITEM_KEY &&
795                             key.offset == root->nodesize)
796                                 ret = 0;
797                 }
798         }
799
800         if (ret == 0) {
801                 leaf = path->nodes[0];
802                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803                 if (item_size >= sizeof(*ei)) {
804                         ei = btrfs_item_ptr(leaf, path->slots[0],
805                                             struct btrfs_extent_item);
806                         num_refs = btrfs_extent_refs(leaf, ei);
807                         extent_flags = btrfs_extent_flags(leaf, ei);
808                 } else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810                         struct btrfs_extent_item_v0 *ei0;
811                         BUG_ON(item_size != sizeof(*ei0));
812                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
813                                              struct btrfs_extent_item_v0);
814                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
815                         /* FIXME: this isn't correct for data */
816                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818                         BUG();
819 #endif
820                 }
821                 BUG_ON(num_refs == 0);
822         } else {
823                 num_refs = 0;
824                 extent_flags = 0;
825                 ret = 0;
826         }
827
828         if (!trans)
829                 goto out;
830
831         delayed_refs = &trans->transaction->delayed_refs;
832         spin_lock(&delayed_refs->lock);
833         head = btrfs_find_delayed_ref_head(trans, bytenr);
834         if (head) {
835                 if (!mutex_trylock(&head->mutex)) {
836                         atomic_inc(&head->node.refs);
837                         spin_unlock(&delayed_refs->lock);
838
839                         btrfs_release_path(path);
840
841                         /*
842                          * Mutex was contended, block until it's released and try
843                          * again
844                          */
845                         mutex_lock(&head->mutex);
846                         mutex_unlock(&head->mutex);
847                         btrfs_put_delayed_ref(&head->node);
848                         goto search_again;
849                 }
850                 spin_lock(&head->lock);
851                 if (head->extent_op && head->extent_op->update_flags)
852                         extent_flags |= head->extent_op->flags_to_set;
853                 else
854                         BUG_ON(num_refs == 0);
855
856                 num_refs += head->node.ref_mod;
857                 spin_unlock(&head->lock);
858                 mutex_unlock(&head->mutex);
859         }
860         spin_unlock(&delayed_refs->lock);
861 out:
862         WARN_ON(num_refs == 0);
863         if (refs)
864                 *refs = num_refs;
865         if (flags)
866                 *flags = extent_flags;
867 out_free:
868         btrfs_free_path(path);
869         return ret;
870 }
871
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980                                   struct btrfs_root *root,
981                                   struct btrfs_path *path,
982                                   u64 owner, u32 extra_size)
983 {
984         struct btrfs_extent_item *item;
985         struct btrfs_extent_item_v0 *ei0;
986         struct btrfs_extent_ref_v0 *ref0;
987         struct btrfs_tree_block_info *bi;
988         struct extent_buffer *leaf;
989         struct btrfs_key key;
990         struct btrfs_key found_key;
991         u32 new_size = sizeof(*item);
992         u64 refs;
993         int ret;
994
995         leaf = path->nodes[0];
996         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000                              struct btrfs_extent_item_v0);
1001         refs = btrfs_extent_refs_v0(leaf, ei0);
1002
1003         if (owner == (u64)-1) {
1004                 while (1) {
1005                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006                                 ret = btrfs_next_leaf(root, path);
1007                                 if (ret < 0)
1008                                         return ret;
1009                                 BUG_ON(ret > 0); /* Corruption */
1010                                 leaf = path->nodes[0];
1011                         }
1012                         btrfs_item_key_to_cpu(leaf, &found_key,
1013                                               path->slots[0]);
1014                         BUG_ON(key.objectid != found_key.objectid);
1015                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016                                 path->slots[0]++;
1017                                 continue;
1018                         }
1019                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020                                               struct btrfs_extent_ref_v0);
1021                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1022                         break;
1023                 }
1024         }
1025         btrfs_release_path(path);
1026
1027         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028                 new_size += sizeof(*bi);
1029
1030         new_size -= sizeof(*ei0);
1031         ret = btrfs_search_slot(trans, root, &key, path,
1032                                 new_size + extra_size, 1);
1033         if (ret < 0)
1034                 return ret;
1035         BUG_ON(ret); /* Corruption */
1036
1037         btrfs_extend_item(root, path, new_size);
1038
1039         leaf = path->nodes[0];
1040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041         btrfs_set_extent_refs(leaf, item, refs);
1042         /* FIXME: get real generation */
1043         btrfs_set_extent_generation(leaf, item, 0);
1044         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045                 btrfs_set_extent_flags(leaf, item,
1046                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048                 bi = (struct btrfs_tree_block_info *)(item + 1);
1049                 /* FIXME: get first key of the block */
1050                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052         } else {
1053                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054         }
1055         btrfs_mark_buffer_dirty(leaf);
1056         return 0;
1057 }
1058 #endif
1059
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062         u32 high_crc = ~(u32)0;
1063         u32 low_crc = ~(u32)0;
1064         __le64 lenum;
1065
1066         lenum = cpu_to_le64(root_objectid);
1067         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068         lenum = cpu_to_le64(owner);
1069         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(offset);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072
1073         return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077                                      struct btrfs_extent_data_ref *ref)
1078 {
1079         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080                                     btrfs_extent_data_ref_objectid(leaf, ref),
1081                                     btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085                                  struct btrfs_extent_data_ref *ref,
1086                                  u64 root_objectid, u64 owner, u64 offset)
1087 {
1088         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091                 return 0;
1092         return 1;
1093 }
1094
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096                                            struct btrfs_root *root,
1097                                            struct btrfs_path *path,
1098                                            u64 bytenr, u64 parent,
1099                                            u64 root_objectid,
1100                                            u64 owner, u64 offset)
1101 {
1102         struct btrfs_key key;
1103         struct btrfs_extent_data_ref *ref;
1104         struct extent_buffer *leaf;
1105         u32 nritems;
1106         int ret;
1107         int recow;
1108         int err = -ENOENT;
1109
1110         key.objectid = bytenr;
1111         if (parent) {
1112                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1113                 key.offset = parent;
1114         } else {
1115                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116                 key.offset = hash_extent_data_ref(root_objectid,
1117                                                   owner, offset);
1118         }
1119 again:
1120         recow = 0;
1121         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122         if (ret < 0) {
1123                 err = ret;
1124                 goto fail;
1125         }
1126
1127         if (parent) {
1128                 if (!ret)
1129                         return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1132                 btrfs_release_path(path);
1133                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134                 if (ret < 0) {
1135                         err = ret;
1136                         goto fail;
1137                 }
1138                 if (!ret)
1139                         return 0;
1140 #endif
1141                 goto fail;
1142         }
1143
1144         leaf = path->nodes[0];
1145         nritems = btrfs_header_nritems(leaf);
1146         while (1) {
1147                 if (path->slots[0] >= nritems) {
1148                         ret = btrfs_next_leaf(root, path);
1149                         if (ret < 0)
1150                                 err = ret;
1151                         if (ret)
1152                                 goto fail;
1153
1154                         leaf = path->nodes[0];
1155                         nritems = btrfs_header_nritems(leaf);
1156                         recow = 1;
1157                 }
1158
1159                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160                 if (key.objectid != bytenr ||
1161                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162                         goto fail;
1163
1164                 ref = btrfs_item_ptr(leaf, path->slots[0],
1165                                      struct btrfs_extent_data_ref);
1166
1167                 if (match_extent_data_ref(leaf, ref, root_objectid,
1168                                           owner, offset)) {
1169                         if (recow) {
1170                                 btrfs_release_path(path);
1171                                 goto again;
1172                         }
1173                         err = 0;
1174                         break;
1175                 }
1176                 path->slots[0]++;
1177         }
1178 fail:
1179         return err;
1180 }
1181
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            u64 bytenr, u64 parent,
1186                                            u64 root_objectid, u64 owner,
1187                                            u64 offset, int refs_to_add)
1188 {
1189         struct btrfs_key key;
1190         struct extent_buffer *leaf;
1191         u32 size;
1192         u32 num_refs;
1193         int ret;
1194
1195         key.objectid = bytenr;
1196         if (parent) {
1197                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1198                 key.offset = parent;
1199                 size = sizeof(struct btrfs_shared_data_ref);
1200         } else {
1201                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202                 key.offset = hash_extent_data_ref(root_objectid,
1203                                                   owner, offset);
1204                 size = sizeof(struct btrfs_extent_data_ref);
1205         }
1206
1207         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208         if (ret && ret != -EEXIST)
1209                 goto fail;
1210
1211         leaf = path->nodes[0];
1212         if (parent) {
1213                 struct btrfs_shared_data_ref *ref;
1214                 ref = btrfs_item_ptr(leaf, path->slots[0],
1215                                      struct btrfs_shared_data_ref);
1216                 if (ret == 0) {
1217                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218                 } else {
1219                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220                         num_refs += refs_to_add;
1221                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222                 }
1223         } else {
1224                 struct btrfs_extent_data_ref *ref;
1225                 while (ret == -EEXIST) {
1226                         ref = btrfs_item_ptr(leaf, path->slots[0],
1227                                              struct btrfs_extent_data_ref);
1228                         if (match_extent_data_ref(leaf, ref, root_objectid,
1229                                                   owner, offset))
1230                                 break;
1231                         btrfs_release_path(path);
1232                         key.offset++;
1233                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1234                                                       size);
1235                         if (ret && ret != -EEXIST)
1236                                 goto fail;
1237
1238                         leaf = path->nodes[0];
1239                 }
1240                 ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                      struct btrfs_extent_data_ref);
1242                 if (ret == 0) {
1243                         btrfs_set_extent_data_ref_root(leaf, ref,
1244                                                        root_objectid);
1245                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248                 } else {
1249                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250                         num_refs += refs_to_add;
1251                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252                 }
1253         }
1254         btrfs_mark_buffer_dirty(leaf);
1255         ret = 0;
1256 fail:
1257         btrfs_release_path(path);
1258         return ret;
1259 }
1260
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            int refs_to_drop, int *last_ref)
1265 {
1266         struct btrfs_key key;
1267         struct btrfs_extent_data_ref *ref1 = NULL;
1268         struct btrfs_shared_data_ref *ref2 = NULL;
1269         struct extent_buffer *leaf;
1270         u32 num_refs = 0;
1271         int ret = 0;
1272
1273         leaf = path->nodes[0];
1274         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275
1276         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_extent_data_ref);
1279                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282                                       struct btrfs_shared_data_ref);
1283                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286                 struct btrfs_extent_ref_v0 *ref0;
1287                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_ref_v0);
1289                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291         } else {
1292                 BUG();
1293         }
1294
1295         BUG_ON(num_refs < refs_to_drop);
1296         num_refs -= refs_to_drop;
1297
1298         if (num_refs == 0) {
1299                 ret = btrfs_del_item(trans, root, path);
1300                 *last_ref = 1;
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1320                                           struct btrfs_path *path,
1321                                           struct btrfs_extent_inline_ref *iref)
1322 {
1323         struct btrfs_key key;
1324         struct extent_buffer *leaf;
1325         struct btrfs_extent_data_ref *ref1;
1326         struct btrfs_shared_data_ref *ref2;
1327         u32 num_refs = 0;
1328
1329         leaf = path->nodes[0];
1330         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1331         if (iref) {
1332                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1333                     BTRFS_EXTENT_DATA_REF_KEY) {
1334                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1335                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1336                 } else {
1337                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1338                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1339                 }
1340         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 WARN_ON(1);
1357         }
1358         return num_refs;
1359 }
1360
1361 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1362                                           struct btrfs_root *root,
1363                                           struct btrfs_path *path,
1364                                           u64 bytenr, u64 parent,
1365                                           u64 root_objectid)
1366 {
1367         struct btrfs_key key;
1368         int ret;
1369
1370         key.objectid = bytenr;
1371         if (parent) {
1372                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1373                 key.offset = parent;
1374         } else {
1375                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1376                 key.offset = root_objectid;
1377         }
1378
1379         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1380         if (ret > 0)
1381                 ret = -ENOENT;
1382 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1383         if (ret == -ENOENT && parent) {
1384                 btrfs_release_path(path);
1385                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1386                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1387                 if (ret > 0)
1388                         ret = -ENOENT;
1389         }
1390 #endif
1391         return ret;
1392 }
1393
1394 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1395                                           struct btrfs_root *root,
1396                                           struct btrfs_path *path,
1397                                           u64 bytenr, u64 parent,
1398                                           u64 root_objectid)
1399 {
1400         struct btrfs_key key;
1401         int ret;
1402
1403         key.objectid = bytenr;
1404         if (parent) {
1405                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1406                 key.offset = parent;
1407         } else {
1408                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1409                 key.offset = root_objectid;
1410         }
1411
1412         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1413         btrfs_release_path(path);
1414         return ret;
1415 }
1416
1417 static inline int extent_ref_type(u64 parent, u64 owner)
1418 {
1419         int type;
1420         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1421                 if (parent > 0)
1422                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1423                 else
1424                         type = BTRFS_TREE_BLOCK_REF_KEY;
1425         } else {
1426                 if (parent > 0)
1427                         type = BTRFS_SHARED_DATA_REF_KEY;
1428                 else
1429                         type = BTRFS_EXTENT_DATA_REF_KEY;
1430         }
1431         return type;
1432 }
1433
1434 static int find_next_key(struct btrfs_path *path, int level,
1435                          struct btrfs_key *key)
1436
1437 {
1438         for (; level < BTRFS_MAX_LEVEL; level++) {
1439                 if (!path->nodes[level])
1440                         break;
1441                 if (path->slots[level] + 1 >=
1442                     btrfs_header_nritems(path->nodes[level]))
1443                         continue;
1444                 if (level == 0)
1445                         btrfs_item_key_to_cpu(path->nodes[level], key,
1446                                               path->slots[level] + 1);
1447                 else
1448                         btrfs_node_key_to_cpu(path->nodes[level], key,
1449                                               path->slots[level] + 1);
1450                 return 0;
1451         }
1452         return 1;
1453 }
1454
1455 /*
1456  * look for inline back ref. if back ref is found, *ref_ret is set
1457  * to the address of inline back ref, and 0 is returned.
1458  *
1459  * if back ref isn't found, *ref_ret is set to the address where it
1460  * should be inserted, and -ENOENT is returned.
1461  *
1462  * if insert is true and there are too many inline back refs, the path
1463  * points to the extent item, and -EAGAIN is returned.
1464  *
1465  * NOTE: inline back refs are ordered in the same way that back ref
1466  *       items in the tree are ordered.
1467  */
1468 static noinline_for_stack
1469 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1470                                  struct btrfs_root *root,
1471                                  struct btrfs_path *path,
1472                                  struct btrfs_extent_inline_ref **ref_ret,
1473                                  u64 bytenr, u64 num_bytes,
1474                                  u64 parent, u64 root_objectid,
1475                                  u64 owner, u64 offset, int insert)
1476 {
1477         struct btrfs_key key;
1478         struct extent_buffer *leaf;
1479         struct btrfs_extent_item *ei;
1480         struct btrfs_extent_inline_ref *iref;
1481         u64 flags;
1482         u64 item_size;
1483         unsigned long ptr;
1484         unsigned long end;
1485         int extra_size;
1486         int type;
1487         int want;
1488         int ret;
1489         int err = 0;
1490         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1491                                                  SKINNY_METADATA);
1492
1493         key.objectid = bytenr;
1494         key.type = BTRFS_EXTENT_ITEM_KEY;
1495         key.offset = num_bytes;
1496
1497         want = extent_ref_type(parent, owner);
1498         if (insert) {
1499                 extra_size = btrfs_extent_inline_ref_size(want);
1500                 path->keep_locks = 1;
1501         } else
1502                 extra_size = -1;
1503
1504         /*
1505          * Owner is our parent level, so we can just add one to get the level
1506          * for the block we are interested in.
1507          */
1508         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1509                 key.type = BTRFS_METADATA_ITEM_KEY;
1510                 key.offset = owner;
1511         }
1512
1513 again:
1514         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1515         if (ret < 0) {
1516                 err = ret;
1517                 goto out;
1518         }
1519
1520         /*
1521          * We may be a newly converted file system which still has the old fat
1522          * extent entries for metadata, so try and see if we have one of those.
1523          */
1524         if (ret > 0 && skinny_metadata) {
1525                 skinny_metadata = false;
1526                 if (path->slots[0]) {
1527                         path->slots[0]--;
1528                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1529                                               path->slots[0]);
1530                         if (key.objectid == bytenr &&
1531                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1532                             key.offset == num_bytes)
1533                                 ret = 0;
1534                 }
1535                 if (ret) {
1536                         key.objectid = bytenr;
1537                         key.type = BTRFS_EXTENT_ITEM_KEY;
1538                         key.offset = num_bytes;
1539                         btrfs_release_path(path);
1540                         goto again;
1541                 }
1542         }
1543
1544         if (ret && !insert) {
1545                 err = -ENOENT;
1546                 goto out;
1547         } else if (WARN_ON(ret)) {
1548                 err = -EIO;
1549                 goto out;
1550         }
1551
1552         leaf = path->nodes[0];
1553         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1554 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1555         if (item_size < sizeof(*ei)) {
1556                 if (!insert) {
1557                         err = -ENOENT;
1558                         goto out;
1559                 }
1560                 ret = convert_extent_item_v0(trans, root, path, owner,
1561                                              extra_size);
1562                 if (ret < 0) {
1563                         err = ret;
1564                         goto out;
1565                 }
1566                 leaf = path->nodes[0];
1567                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1568         }
1569 #endif
1570         BUG_ON(item_size < sizeof(*ei));
1571
1572         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1573         flags = btrfs_extent_flags(leaf, ei);
1574
1575         ptr = (unsigned long)(ei + 1);
1576         end = (unsigned long)ei + item_size;
1577
1578         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1579                 ptr += sizeof(struct btrfs_tree_block_info);
1580                 BUG_ON(ptr > end);
1581         }
1582
1583         err = -ENOENT;
1584         while (1) {
1585                 if (ptr >= end) {
1586                         WARN_ON(ptr > end);
1587                         break;
1588                 }
1589                 iref = (struct btrfs_extent_inline_ref *)ptr;
1590                 type = btrfs_extent_inline_ref_type(leaf, iref);
1591                 if (want < type)
1592                         break;
1593                 if (want > type) {
1594                         ptr += btrfs_extent_inline_ref_size(type);
1595                         continue;
1596                 }
1597
1598                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1599                         struct btrfs_extent_data_ref *dref;
1600                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1601                         if (match_extent_data_ref(leaf, dref, root_objectid,
1602                                                   owner, offset)) {
1603                                 err = 0;
1604                                 break;
1605                         }
1606                         if (hash_extent_data_ref_item(leaf, dref) <
1607                             hash_extent_data_ref(root_objectid, owner, offset))
1608                                 break;
1609                 } else {
1610                         u64 ref_offset;
1611                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1612                         if (parent > 0) {
1613                                 if (parent == ref_offset) {
1614                                         err = 0;
1615                                         break;
1616                                 }
1617                                 if (ref_offset < parent)
1618                                         break;
1619                         } else {
1620                                 if (root_objectid == ref_offset) {
1621                                         err = 0;
1622                                         break;
1623                                 }
1624                                 if (ref_offset < root_objectid)
1625                                         break;
1626                         }
1627                 }
1628                 ptr += btrfs_extent_inline_ref_size(type);
1629         }
1630         if (err == -ENOENT && insert) {
1631                 if (item_size + extra_size >=
1632                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1633                         err = -EAGAIN;
1634                         goto out;
1635                 }
1636                 /*
1637                  * To add new inline back ref, we have to make sure
1638                  * there is no corresponding back ref item.
1639                  * For simplicity, we just do not add new inline back
1640                  * ref if there is any kind of item for this block
1641                  */
1642                 if (find_next_key(path, 0, &key) == 0 &&
1643                     key.objectid == bytenr &&
1644                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1645                         err = -EAGAIN;
1646                         goto out;
1647                 }
1648         }
1649         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1650 out:
1651         if (insert) {
1652                 path->keep_locks = 0;
1653                 btrfs_unlock_up_safe(path, 1);
1654         }
1655         return err;
1656 }
1657
1658 /*
1659  * helper to add new inline back ref
1660  */
1661 static noinline_for_stack
1662 void setup_inline_extent_backref(struct btrfs_root *root,
1663                                  struct btrfs_path *path,
1664                                  struct btrfs_extent_inline_ref *iref,
1665                                  u64 parent, u64 root_objectid,
1666                                  u64 owner, u64 offset, int refs_to_add,
1667                                  struct btrfs_delayed_extent_op *extent_op)
1668 {
1669         struct extent_buffer *leaf;
1670         struct btrfs_extent_item *ei;
1671         unsigned long ptr;
1672         unsigned long end;
1673         unsigned long item_offset;
1674         u64 refs;
1675         int size;
1676         int type;
1677
1678         leaf = path->nodes[0];
1679         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1680         item_offset = (unsigned long)iref - (unsigned long)ei;
1681
1682         type = extent_ref_type(parent, owner);
1683         size = btrfs_extent_inline_ref_size(type);
1684
1685         btrfs_extend_item(root, path, size);
1686
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         refs = btrfs_extent_refs(leaf, ei);
1689         refs += refs_to_add;
1690         btrfs_set_extent_refs(leaf, ei, refs);
1691         if (extent_op)
1692                 __run_delayed_extent_op(extent_op, leaf, ei);
1693
1694         ptr = (unsigned long)ei + item_offset;
1695         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1696         if (ptr < end - size)
1697                 memmove_extent_buffer(leaf, ptr + size, ptr,
1698                                       end - size - ptr);
1699
1700         iref = (struct btrfs_extent_inline_ref *)ptr;
1701         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1702         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1703                 struct btrfs_extent_data_ref *dref;
1704                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1705                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1706                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1707                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1708                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1709         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710                 struct btrfs_shared_data_ref *sref;
1711                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1712                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1713                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1714         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1715                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1716         } else {
1717                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1718         }
1719         btrfs_mark_buffer_dirty(leaf);
1720 }
1721
1722 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1723                                  struct btrfs_root *root,
1724                                  struct btrfs_path *path,
1725                                  struct btrfs_extent_inline_ref **ref_ret,
1726                                  u64 bytenr, u64 num_bytes, u64 parent,
1727                                  u64 root_objectid, u64 owner, u64 offset)
1728 {
1729         int ret;
1730
1731         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1732                                            bytenr, num_bytes, parent,
1733                                            root_objectid, owner, offset, 0);
1734         if (ret != -ENOENT)
1735                 return ret;
1736
1737         btrfs_release_path(path);
1738         *ref_ret = NULL;
1739
1740         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1741                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1742                                             root_objectid);
1743         } else {
1744                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1745                                              root_objectid, owner, offset);
1746         }
1747         return ret;
1748 }
1749
1750 /*
1751  * helper to update/remove inline back ref
1752  */
1753 static noinline_for_stack
1754 void update_inline_extent_backref(struct btrfs_root *root,
1755                                   struct btrfs_path *path,
1756                                   struct btrfs_extent_inline_ref *iref,
1757                                   int refs_to_mod,
1758                                   struct btrfs_delayed_extent_op *extent_op,
1759                                   int *last_ref)
1760 {
1761         struct extent_buffer *leaf;
1762         struct btrfs_extent_item *ei;
1763         struct btrfs_extent_data_ref *dref = NULL;
1764         struct btrfs_shared_data_ref *sref = NULL;
1765         unsigned long ptr;
1766         unsigned long end;
1767         u32 item_size;
1768         int size;
1769         int type;
1770         u64 refs;
1771
1772         leaf = path->nodes[0];
1773         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1774         refs = btrfs_extent_refs(leaf, ei);
1775         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1776         refs += refs_to_mod;
1777         btrfs_set_extent_refs(leaf, ei, refs);
1778         if (extent_op)
1779                 __run_delayed_extent_op(extent_op, leaf, ei);
1780
1781         type = btrfs_extent_inline_ref_type(leaf, iref);
1782
1783         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1784                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1785                 refs = btrfs_extent_data_ref_count(leaf, dref);
1786         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1787                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1788                 refs = btrfs_shared_data_ref_count(leaf, sref);
1789         } else {
1790                 refs = 1;
1791                 BUG_ON(refs_to_mod != -1);
1792         }
1793
1794         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1795         refs += refs_to_mod;
1796
1797         if (refs > 0) {
1798                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1799                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1800                 else
1801                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1802         } else {
1803                 *last_ref = 1;
1804                 size =  btrfs_extent_inline_ref_size(type);
1805                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1806                 ptr = (unsigned long)iref;
1807                 end = (unsigned long)ei + item_size;
1808                 if (ptr + size < end)
1809                         memmove_extent_buffer(leaf, ptr, ptr + size,
1810                                               end - ptr - size);
1811                 item_size -= size;
1812                 btrfs_truncate_item(root, path, item_size, 1);
1813         }
1814         btrfs_mark_buffer_dirty(leaf);
1815 }
1816
1817 static noinline_for_stack
1818 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1819                                  struct btrfs_root *root,
1820                                  struct btrfs_path *path,
1821                                  u64 bytenr, u64 num_bytes, u64 parent,
1822                                  u64 root_objectid, u64 owner,
1823                                  u64 offset, int refs_to_add,
1824                                  struct btrfs_delayed_extent_op *extent_op)
1825 {
1826         struct btrfs_extent_inline_ref *iref;
1827         int ret;
1828
1829         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1830                                            bytenr, num_bytes, parent,
1831                                            root_objectid, owner, offset, 1);
1832         if (ret == 0) {
1833                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1834                 update_inline_extent_backref(root, path, iref,
1835                                              refs_to_add, extent_op, NULL);
1836         } else if (ret == -ENOENT) {
1837                 setup_inline_extent_backref(root, path, iref, parent,
1838                                             root_objectid, owner, offset,
1839                                             refs_to_add, extent_op);
1840                 ret = 0;
1841         }
1842         return ret;
1843 }
1844
1845 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1846                                  struct btrfs_root *root,
1847                                  struct btrfs_path *path,
1848                                  u64 bytenr, u64 parent, u64 root_objectid,
1849                                  u64 owner, u64 offset, int refs_to_add)
1850 {
1851         int ret;
1852         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1853                 BUG_ON(refs_to_add != 1);
1854                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1855                                             parent, root_objectid);
1856         } else {
1857                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1858                                              parent, root_objectid,
1859                                              owner, offset, refs_to_add);
1860         }
1861         return ret;
1862 }
1863
1864 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1865                                  struct btrfs_root *root,
1866                                  struct btrfs_path *path,
1867                                  struct btrfs_extent_inline_ref *iref,
1868                                  int refs_to_drop, int is_data, int *last_ref)
1869 {
1870         int ret = 0;
1871
1872         BUG_ON(!is_data && refs_to_drop != 1);
1873         if (iref) {
1874                 update_inline_extent_backref(root, path, iref,
1875                                              -refs_to_drop, NULL, last_ref);
1876         } else if (is_data) {
1877                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1878                                              last_ref);
1879         } else {
1880                 *last_ref = 1;
1881                 ret = btrfs_del_item(trans, root, path);
1882         }
1883         return ret;
1884 }
1885
1886 static int btrfs_issue_discard(struct block_device *bdev,
1887                                 u64 start, u64 len)
1888 {
1889         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1890 }
1891
1892 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1893                          u64 num_bytes, u64 *actual_bytes)
1894 {
1895         int ret;
1896         u64 discarded_bytes = 0;
1897         struct btrfs_bio *bbio = NULL;
1898
1899
1900         /* Tell the block device(s) that the sectors can be discarded */
1901         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1902                               bytenr, &num_bytes, &bbio, 0);
1903         /* Error condition is -ENOMEM */
1904         if (!ret) {
1905                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1906                 int i;
1907
1908
1909                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1910                         if (!stripe->dev->can_discard)
1911                                 continue;
1912
1913                         ret = btrfs_issue_discard(stripe->dev->bdev,
1914                                                   stripe->physical,
1915                                                   stripe->length);
1916                         if (!ret)
1917                                 discarded_bytes += stripe->length;
1918                         else if (ret != -EOPNOTSUPP)
1919                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1920
1921                         /*
1922                          * Just in case we get back EOPNOTSUPP for some reason,
1923                          * just ignore the return value so we don't screw up
1924                          * people calling discard_extent.
1925                          */
1926                         ret = 0;
1927                 }
1928                 btrfs_put_bbio(bbio);
1929         }
1930
1931         if (actual_bytes)
1932                 *actual_bytes = discarded_bytes;
1933
1934
1935         if (ret == -EOPNOTSUPP)
1936                 ret = 0;
1937         return ret;
1938 }
1939
1940 /* Can return -ENOMEM */
1941 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1942                          struct btrfs_root *root,
1943                          u64 bytenr, u64 num_bytes, u64 parent,
1944                          u64 root_objectid, u64 owner, u64 offset,
1945                          int no_quota)
1946 {
1947         int ret;
1948         struct btrfs_fs_info *fs_info = root->fs_info;
1949
1950         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1951                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1952
1953         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1954                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1955                                         num_bytes,
1956                                         parent, root_objectid, (int)owner,
1957                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1958         } else {
1959                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1960                                         num_bytes,
1961                                         parent, root_objectid, owner, offset,
1962                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1963         }
1964         return ret;
1965 }
1966
1967 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1968                                   struct btrfs_root *root,
1969                                   struct btrfs_delayed_ref_node *node,
1970                                   u64 parent, u64 root_objectid,
1971                                   u64 owner, u64 offset, int refs_to_add,
1972                                   struct btrfs_delayed_extent_op *extent_op)
1973 {
1974         struct btrfs_fs_info *fs_info = root->fs_info;
1975         struct btrfs_path *path;
1976         struct extent_buffer *leaf;
1977         struct btrfs_extent_item *item;
1978         struct btrfs_key key;
1979         u64 bytenr = node->bytenr;
1980         u64 num_bytes = node->num_bytes;
1981         u64 refs;
1982         int ret;
1983         int no_quota = node->no_quota;
1984
1985         path = btrfs_alloc_path();
1986         if (!path)
1987                 return -ENOMEM;
1988
1989         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1990                 no_quota = 1;
1991
1992         path->reada = 1;
1993         path->leave_spinning = 1;
1994         /* this will setup the path even if it fails to insert the back ref */
1995         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1996                                            bytenr, num_bytes, parent,
1997                                            root_objectid, owner, offset,
1998                                            refs_to_add, extent_op);
1999         if ((ret < 0 && ret != -EAGAIN) || !ret)
2000                 goto out;
2001
2002         /*
2003          * Ok we had -EAGAIN which means we didn't have space to insert and
2004          * inline extent ref, so just update the reference count and add a
2005          * normal backref.
2006          */
2007         leaf = path->nodes[0];
2008         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2009         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2010         refs = btrfs_extent_refs(leaf, item);
2011         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2012         if (extent_op)
2013                 __run_delayed_extent_op(extent_op, leaf, item);
2014
2015         btrfs_mark_buffer_dirty(leaf);
2016         btrfs_release_path(path);
2017
2018         path->reada = 1;
2019         path->leave_spinning = 1;
2020         /* now insert the actual backref */
2021         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2022                                     path, bytenr, parent, root_objectid,
2023                                     owner, offset, refs_to_add);
2024         if (ret)
2025                 btrfs_abort_transaction(trans, root, ret);
2026 out:
2027         btrfs_free_path(path);
2028         return ret;
2029 }
2030
2031 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2032                                 struct btrfs_root *root,
2033                                 struct btrfs_delayed_ref_node *node,
2034                                 struct btrfs_delayed_extent_op *extent_op,
2035                                 int insert_reserved)
2036 {
2037         int ret = 0;
2038         struct btrfs_delayed_data_ref *ref;
2039         struct btrfs_key ins;
2040         u64 parent = 0;
2041         u64 ref_root = 0;
2042         u64 flags = 0;
2043
2044         ins.objectid = node->bytenr;
2045         ins.offset = node->num_bytes;
2046         ins.type = BTRFS_EXTENT_ITEM_KEY;
2047
2048         ref = btrfs_delayed_node_to_data_ref(node);
2049         trace_run_delayed_data_ref(node, ref, node->action);
2050
2051         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2052                 parent = ref->parent;
2053         ref_root = ref->root;
2054
2055         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2056                 if (extent_op)
2057                         flags |= extent_op->flags_to_set;
2058                 ret = alloc_reserved_file_extent(trans, root,
2059                                                  parent, ref_root, flags,
2060                                                  ref->objectid, ref->offset,
2061                                                  &ins, node->ref_mod);
2062         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2063                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2064                                              ref_root, ref->objectid,
2065                                              ref->offset, node->ref_mod,
2066                                              extent_op);
2067         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2068                 ret = __btrfs_free_extent(trans, root, node, parent,
2069                                           ref_root, ref->objectid,
2070                                           ref->offset, node->ref_mod,
2071                                           extent_op);
2072         } else {
2073                 BUG();
2074         }
2075         return ret;
2076 }
2077
2078 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2079                                     struct extent_buffer *leaf,
2080                                     struct btrfs_extent_item *ei)
2081 {
2082         u64 flags = btrfs_extent_flags(leaf, ei);
2083         if (extent_op->update_flags) {
2084                 flags |= extent_op->flags_to_set;
2085                 btrfs_set_extent_flags(leaf, ei, flags);
2086         }
2087
2088         if (extent_op->update_key) {
2089                 struct btrfs_tree_block_info *bi;
2090                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2091                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2092                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2093         }
2094 }
2095
2096 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2097                                  struct btrfs_root *root,
2098                                  struct btrfs_delayed_ref_node *node,
2099                                  struct btrfs_delayed_extent_op *extent_op)
2100 {
2101         struct btrfs_key key;
2102         struct btrfs_path *path;
2103         struct btrfs_extent_item *ei;
2104         struct extent_buffer *leaf;
2105         u32 item_size;
2106         int ret;
2107         int err = 0;
2108         int metadata = !extent_op->is_data;
2109
2110         if (trans->aborted)
2111                 return 0;
2112
2113         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2114                 metadata = 0;
2115
2116         path = btrfs_alloc_path();
2117         if (!path)
2118                 return -ENOMEM;
2119
2120         key.objectid = node->bytenr;
2121
2122         if (metadata) {
2123                 key.type = BTRFS_METADATA_ITEM_KEY;
2124                 key.offset = extent_op->level;
2125         } else {
2126                 key.type = BTRFS_EXTENT_ITEM_KEY;
2127                 key.offset = node->num_bytes;
2128         }
2129
2130 again:
2131         path->reada = 1;
2132         path->leave_spinning = 1;
2133         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2134                                 path, 0, 1);
2135         if (ret < 0) {
2136                 err = ret;
2137                 goto out;
2138         }
2139         if (ret > 0) {
2140                 if (metadata) {
2141                         if (path->slots[0] > 0) {
2142                                 path->slots[0]--;
2143                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2144                                                       path->slots[0]);
2145                                 if (key.objectid == node->bytenr &&
2146                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2147                                     key.offset == node->num_bytes)
2148                                         ret = 0;
2149                         }
2150                         if (ret > 0) {
2151                                 btrfs_release_path(path);
2152                                 metadata = 0;
2153
2154                                 key.objectid = node->bytenr;
2155                                 key.offset = node->num_bytes;
2156                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2157                                 goto again;
2158                         }
2159                 } else {
2160                         err = -EIO;
2161                         goto out;
2162                 }
2163         }
2164
2165         leaf = path->nodes[0];
2166         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2167 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2168         if (item_size < sizeof(*ei)) {
2169                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2170                                              path, (u64)-1, 0);
2171                 if (ret < 0) {
2172                         err = ret;
2173                         goto out;
2174                 }
2175                 leaf = path->nodes[0];
2176                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2177         }
2178 #endif
2179         BUG_ON(item_size < sizeof(*ei));
2180         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2181         __run_delayed_extent_op(extent_op, leaf, ei);
2182
2183         btrfs_mark_buffer_dirty(leaf);
2184 out:
2185         btrfs_free_path(path);
2186         return err;
2187 }
2188
2189 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2190                                 struct btrfs_root *root,
2191                                 struct btrfs_delayed_ref_node *node,
2192                                 struct btrfs_delayed_extent_op *extent_op,
2193                                 int insert_reserved)
2194 {
2195         int ret = 0;
2196         struct btrfs_delayed_tree_ref *ref;
2197         struct btrfs_key ins;
2198         u64 parent = 0;
2199         u64 ref_root = 0;
2200         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2201                                                  SKINNY_METADATA);
2202
2203         ref = btrfs_delayed_node_to_tree_ref(node);
2204         trace_run_delayed_tree_ref(node, ref, node->action);
2205
2206         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2207                 parent = ref->parent;
2208         ref_root = ref->root;
2209
2210         ins.objectid = node->bytenr;
2211         if (skinny_metadata) {
2212                 ins.offset = ref->level;
2213                 ins.type = BTRFS_METADATA_ITEM_KEY;
2214         } else {
2215                 ins.offset = node->num_bytes;
2216                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2217         }
2218
2219         BUG_ON(node->ref_mod != 1);
2220         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2221                 BUG_ON(!extent_op || !extent_op->update_flags);
2222                 ret = alloc_reserved_tree_block(trans, root,
2223                                                 parent, ref_root,
2224                                                 extent_op->flags_to_set,
2225                                                 &extent_op->key,
2226                                                 ref->level, &ins,
2227                                                 node->no_quota);
2228         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2229                 ret = __btrfs_inc_extent_ref(trans, root, node,
2230                                              parent, ref_root,
2231                                              ref->level, 0, 1,
2232                                              extent_op);
2233         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2234                 ret = __btrfs_free_extent(trans, root, node,
2235                                           parent, ref_root,
2236                                           ref->level, 0, 1, extent_op);
2237         } else {
2238                 BUG();
2239         }
2240         return ret;
2241 }
2242
2243 /* helper function to actually process a single delayed ref entry */
2244 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2245                                struct btrfs_root *root,
2246                                struct btrfs_delayed_ref_node *node,
2247                                struct btrfs_delayed_extent_op *extent_op,
2248                                int insert_reserved)
2249 {
2250         int ret = 0;
2251
2252         if (trans->aborted) {
2253                 if (insert_reserved)
2254                         btrfs_pin_extent(root, node->bytenr,
2255                                          node->num_bytes, 1);
2256                 return 0;
2257         }
2258
2259         if (btrfs_delayed_ref_is_head(node)) {
2260                 struct btrfs_delayed_ref_head *head;
2261                 /*
2262                  * we've hit the end of the chain and we were supposed
2263                  * to insert this extent into the tree.  But, it got
2264                  * deleted before we ever needed to insert it, so all
2265                  * we have to do is clean up the accounting
2266                  */
2267                 BUG_ON(extent_op);
2268                 head = btrfs_delayed_node_to_head(node);
2269                 trace_run_delayed_ref_head(node, head, node->action);
2270
2271                 if (insert_reserved) {
2272                         btrfs_pin_extent(root, node->bytenr,
2273                                          node->num_bytes, 1);
2274                         if (head->is_data) {
2275                                 ret = btrfs_del_csums(trans, root,
2276                                                       node->bytenr,
2277                                                       node->num_bytes);
2278                         }
2279                 }
2280                 return ret;
2281         }
2282
2283         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2284             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2285                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2286                                            insert_reserved);
2287         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2288                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2289                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2290                                            insert_reserved);
2291         else
2292                 BUG();
2293         return ret;
2294 }
2295
2296 static inline struct btrfs_delayed_ref_node *
2297 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2298 {
2299         struct btrfs_delayed_ref_node *ref;
2300
2301         if (list_empty(&head->ref_list))
2302                 return NULL;
2303
2304         /*
2305          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2306          * This is to prevent a ref count from going down to zero, which deletes
2307          * the extent item from the extent tree, when there still are references
2308          * to add, which would fail because they would not find the extent item.
2309          */
2310         list_for_each_entry(ref, &head->ref_list, list) {
2311                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2312                         return ref;
2313         }
2314
2315         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2316                           list);
2317 }
2318
2319 /*
2320  * Returns 0 on success or if called with an already aborted transaction.
2321  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2322  */
2323 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2324                                              struct btrfs_root *root,
2325                                              unsigned long nr)
2326 {
2327         struct btrfs_delayed_ref_root *delayed_refs;
2328         struct btrfs_delayed_ref_node *ref;
2329         struct btrfs_delayed_ref_head *locked_ref = NULL;
2330         struct btrfs_delayed_extent_op *extent_op;
2331         struct btrfs_fs_info *fs_info = root->fs_info;
2332         ktime_t start = ktime_get();
2333         int ret;
2334         unsigned long count = 0;
2335         unsigned long actual_count = 0;
2336         int must_insert_reserved = 0;
2337
2338         delayed_refs = &trans->transaction->delayed_refs;
2339         while (1) {
2340                 if (!locked_ref) {
2341                         if (count >= nr)
2342                                 break;
2343
2344                         spin_lock(&delayed_refs->lock);
2345                         locked_ref = btrfs_select_ref_head(trans);
2346                         if (!locked_ref) {
2347                                 spin_unlock(&delayed_refs->lock);
2348                                 break;
2349                         }
2350
2351                         /* grab the lock that says we are going to process
2352                          * all the refs for this head */
2353                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2354                         spin_unlock(&delayed_refs->lock);
2355                         /*
2356                          * we may have dropped the spin lock to get the head
2357                          * mutex lock, and that might have given someone else
2358                          * time to free the head.  If that's true, it has been
2359                          * removed from our list and we can move on.
2360                          */
2361                         if (ret == -EAGAIN) {
2362                                 locked_ref = NULL;
2363                                 count++;
2364                                 continue;
2365                         }
2366                 }
2367
2368                 spin_lock(&locked_ref->lock);
2369
2370                 /*
2371                  * locked_ref is the head node, so we have to go one
2372                  * node back for any delayed ref updates
2373                  */
2374                 ref = select_delayed_ref(locked_ref);
2375
2376                 if (ref && ref->seq &&
2377                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2378                         spin_unlock(&locked_ref->lock);
2379                         btrfs_delayed_ref_unlock(locked_ref);
2380                         spin_lock(&delayed_refs->lock);
2381                         locked_ref->processing = 0;
2382                         delayed_refs->num_heads_ready++;
2383                         spin_unlock(&delayed_refs->lock);
2384                         locked_ref = NULL;
2385                         cond_resched();
2386                         count++;
2387                         continue;
2388                 }
2389
2390                 /*
2391                  * record the must insert reserved flag before we
2392                  * drop the spin lock.
2393                  */
2394                 must_insert_reserved = locked_ref->must_insert_reserved;
2395                 locked_ref->must_insert_reserved = 0;
2396
2397                 extent_op = locked_ref->extent_op;
2398                 locked_ref->extent_op = NULL;
2399
2400                 if (!ref) {
2401
2402
2403                         /* All delayed refs have been processed, Go ahead
2404                          * and send the head node to run_one_delayed_ref,
2405                          * so that any accounting fixes can happen
2406                          */
2407                         ref = &locked_ref->node;
2408
2409                         if (extent_op && must_insert_reserved) {
2410                                 btrfs_free_delayed_extent_op(extent_op);
2411                                 extent_op = NULL;
2412                         }
2413
2414                         if (extent_op) {
2415                                 spin_unlock(&locked_ref->lock);
2416                                 ret = run_delayed_extent_op(trans, root,
2417                                                             ref, extent_op);
2418                                 btrfs_free_delayed_extent_op(extent_op);
2419
2420                                 if (ret) {
2421                                         /*
2422                                          * Need to reset must_insert_reserved if
2423                                          * there was an error so the abort stuff
2424                                          * can cleanup the reserved space
2425                                          * properly.
2426                                          */
2427                                         if (must_insert_reserved)
2428                                                 locked_ref->must_insert_reserved = 1;
2429                                         locked_ref->processing = 0;
2430                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2431                                         btrfs_delayed_ref_unlock(locked_ref);
2432                                         return ret;
2433                                 }
2434                                 continue;
2435                         }
2436
2437                         /*
2438                          * Need to drop our head ref lock and re-aqcuire the
2439                          * delayed ref lock and then re-check to make sure
2440                          * nobody got added.
2441                          */
2442                         spin_unlock(&locked_ref->lock);
2443                         spin_lock(&delayed_refs->lock);
2444                         spin_lock(&locked_ref->lock);
2445                         if (!list_empty(&locked_ref->ref_list) ||
2446                             locked_ref->extent_op) {
2447                                 spin_unlock(&locked_ref->lock);
2448                                 spin_unlock(&delayed_refs->lock);
2449                                 continue;
2450                         }
2451                         ref->in_tree = 0;
2452                         delayed_refs->num_heads--;
2453                         rb_erase(&locked_ref->href_node,
2454                                  &delayed_refs->href_root);
2455                         spin_unlock(&delayed_refs->lock);
2456                 } else {
2457                         actual_count++;
2458                         ref->in_tree = 0;
2459                         list_del(&ref->list);
2460                 }
2461                 atomic_dec(&delayed_refs->num_entries);
2462
2463                 if (!btrfs_delayed_ref_is_head(ref)) {
2464                         /*
2465                          * when we play the delayed ref, also correct the
2466                          * ref_mod on head
2467                          */
2468                         switch (ref->action) {
2469                         case BTRFS_ADD_DELAYED_REF:
2470                         case BTRFS_ADD_DELAYED_EXTENT:
2471                                 locked_ref->node.ref_mod -= ref->ref_mod;
2472                                 break;
2473                         case BTRFS_DROP_DELAYED_REF:
2474                                 locked_ref->node.ref_mod += ref->ref_mod;
2475                                 break;
2476                         default:
2477                                 WARN_ON(1);
2478                         }
2479                 }
2480                 spin_unlock(&locked_ref->lock);
2481
2482                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2483                                           must_insert_reserved);
2484
2485                 btrfs_free_delayed_extent_op(extent_op);
2486                 if (ret) {
2487                         locked_ref->processing = 0;
2488                         btrfs_delayed_ref_unlock(locked_ref);
2489                         btrfs_put_delayed_ref(ref);
2490                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2491                         return ret;
2492                 }
2493
2494                 /*
2495                  * If this node is a head, that means all the refs in this head
2496                  * have been dealt with, and we will pick the next head to deal
2497                  * with, so we must unlock the head and drop it from the cluster
2498                  * list before we release it.
2499                  */
2500                 if (btrfs_delayed_ref_is_head(ref)) {
2501                         if (locked_ref->is_data &&
2502                             locked_ref->total_ref_mod < 0) {
2503                                 spin_lock(&delayed_refs->lock);
2504                                 delayed_refs->pending_csums -= ref->num_bytes;
2505                                 spin_unlock(&delayed_refs->lock);
2506                         }
2507                         btrfs_delayed_ref_unlock(locked_ref);
2508                         locked_ref = NULL;
2509                 }
2510                 btrfs_put_delayed_ref(ref);
2511                 count++;
2512                 cond_resched();
2513         }
2514
2515         /*
2516          * We don't want to include ref heads since we can have empty ref heads
2517          * and those will drastically skew our runtime down since we just do
2518          * accounting, no actual extent tree updates.
2519          */
2520         if (actual_count > 0) {
2521                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2522                 u64 avg;
2523
2524                 /*
2525                  * We weigh the current average higher than our current runtime
2526                  * to avoid large swings in the average.
2527                  */
2528                 spin_lock(&delayed_refs->lock);
2529                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2530                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2531                 spin_unlock(&delayed_refs->lock);
2532         }
2533         return 0;
2534 }
2535
2536 #ifdef SCRAMBLE_DELAYED_REFS
2537 /*
2538  * Normally delayed refs get processed in ascending bytenr order. This
2539  * correlates in most cases to the order added. To expose dependencies on this
2540  * order, we start to process the tree in the middle instead of the beginning
2541  */
2542 static u64 find_middle(struct rb_root *root)
2543 {
2544         struct rb_node *n = root->rb_node;
2545         struct btrfs_delayed_ref_node *entry;
2546         int alt = 1;
2547         u64 middle;
2548         u64 first = 0, last = 0;
2549
2550         n = rb_first(root);
2551         if (n) {
2552                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2553                 first = entry->bytenr;
2554         }
2555         n = rb_last(root);
2556         if (n) {
2557                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2558                 last = entry->bytenr;
2559         }
2560         n = root->rb_node;
2561
2562         while (n) {
2563                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2564                 WARN_ON(!entry->in_tree);
2565
2566                 middle = entry->bytenr;
2567
2568                 if (alt)
2569                         n = n->rb_left;
2570                 else
2571                         n = n->rb_right;
2572
2573                 alt = 1 - alt;
2574         }
2575         return middle;
2576 }
2577 #endif
2578
2579 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2580 {
2581         u64 num_bytes;
2582
2583         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2584                              sizeof(struct btrfs_extent_inline_ref));
2585         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2586                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2587
2588         /*
2589          * We don't ever fill up leaves all the way so multiply by 2 just to be
2590          * closer to what we're really going to want to ouse.
2591          */
2592         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2593 }
2594
2595 /*
2596  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2597  * would require to store the csums for that many bytes.
2598  */
2599 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2600 {
2601         u64 csum_size;
2602         u64 num_csums_per_leaf;
2603         u64 num_csums;
2604
2605         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2606         num_csums_per_leaf = div64_u64(csum_size,
2607                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2608         num_csums = div64_u64(csum_bytes, root->sectorsize);
2609         num_csums += num_csums_per_leaf - 1;
2610         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2611         return num_csums;
2612 }
2613
2614 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2615                                        struct btrfs_root *root)
2616 {
2617         struct btrfs_block_rsv *global_rsv;
2618         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2619         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2620         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2621         u64 num_bytes, num_dirty_bgs_bytes;
2622         int ret = 0;
2623
2624         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2625         num_heads = heads_to_leaves(root, num_heads);
2626         if (num_heads > 1)
2627                 num_bytes += (num_heads - 1) * root->nodesize;
2628         num_bytes <<= 1;
2629         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2630         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2631                                                              num_dirty_bgs);
2632         global_rsv = &root->fs_info->global_block_rsv;
2633
2634         /*
2635          * If we can't allocate any more chunks lets make sure we have _lots_ of
2636          * wiggle room since running delayed refs can create more delayed refs.
2637          */
2638         if (global_rsv->space_info->full) {
2639                 num_dirty_bgs_bytes <<= 1;
2640                 num_bytes <<= 1;
2641         }
2642
2643         spin_lock(&global_rsv->lock);
2644         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2645                 ret = 1;
2646         spin_unlock(&global_rsv->lock);
2647         return ret;
2648 }
2649
2650 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2651                                        struct btrfs_root *root)
2652 {
2653         struct btrfs_fs_info *fs_info = root->fs_info;
2654         u64 num_entries =
2655                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2656         u64 avg_runtime;
2657         u64 val;
2658
2659         smp_mb();
2660         avg_runtime = fs_info->avg_delayed_ref_runtime;
2661         val = num_entries * avg_runtime;
2662         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2663                 return 1;
2664         if (val >= NSEC_PER_SEC / 2)
2665                 return 2;
2666
2667         return btrfs_check_space_for_delayed_refs(trans, root);
2668 }
2669
2670 struct async_delayed_refs {
2671         struct btrfs_root *root;
2672         int count;
2673         int error;
2674         int sync;
2675         struct completion wait;
2676         struct btrfs_work work;
2677 };
2678
2679 static void delayed_ref_async_start(struct btrfs_work *work)
2680 {
2681         struct async_delayed_refs *async;
2682         struct btrfs_trans_handle *trans;
2683         int ret;
2684
2685         async = container_of(work, struct async_delayed_refs, work);
2686
2687         trans = btrfs_join_transaction(async->root);
2688         if (IS_ERR(trans)) {
2689                 async->error = PTR_ERR(trans);
2690                 goto done;
2691         }
2692
2693         /*
2694          * trans->sync means that when we call end_transaciton, we won't
2695          * wait on delayed refs
2696          */
2697         trans->sync = true;
2698         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2699         if (ret)
2700                 async->error = ret;
2701
2702         ret = btrfs_end_transaction(trans, async->root);
2703         if (ret && !async->error)
2704                 async->error = ret;
2705 done:
2706         if (async->sync)
2707                 complete(&async->wait);
2708         else
2709                 kfree(async);
2710 }
2711
2712 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2713                                  unsigned long count, int wait)
2714 {
2715         struct async_delayed_refs *async;
2716         int ret;
2717
2718         async = kmalloc(sizeof(*async), GFP_NOFS);
2719         if (!async)
2720                 return -ENOMEM;
2721
2722         async->root = root->fs_info->tree_root;
2723         async->count = count;
2724         async->error = 0;
2725         if (wait)
2726                 async->sync = 1;
2727         else
2728                 async->sync = 0;
2729         init_completion(&async->wait);
2730
2731         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2732                         delayed_ref_async_start, NULL, NULL);
2733
2734         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2735
2736         if (wait) {
2737                 wait_for_completion(&async->wait);
2738                 ret = async->error;
2739                 kfree(async);
2740                 return ret;
2741         }
2742         return 0;
2743 }
2744
2745 /*
2746  * this starts processing the delayed reference count updates and
2747  * extent insertions we have queued up so far.  count can be
2748  * 0, which means to process everything in the tree at the start
2749  * of the run (but not newly added entries), or it can be some target
2750  * number you'd like to process.
2751  *
2752  * Returns 0 on success or if called with an aborted transaction
2753  * Returns <0 on error and aborts the transaction
2754  */
2755 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2756                            struct btrfs_root *root, unsigned long count)
2757 {
2758         struct rb_node *node;
2759         struct btrfs_delayed_ref_root *delayed_refs;
2760         struct btrfs_delayed_ref_head *head;
2761         int ret;
2762         int run_all = count == (unsigned long)-1;
2763
2764         /* We'll clean this up in btrfs_cleanup_transaction */
2765         if (trans->aborted)
2766                 return 0;
2767
2768         if (root == root->fs_info->extent_root)
2769                 root = root->fs_info->tree_root;
2770
2771         delayed_refs = &trans->transaction->delayed_refs;
2772         if (count == 0)
2773                 count = atomic_read(&delayed_refs->num_entries) * 2;
2774
2775 again:
2776 #ifdef SCRAMBLE_DELAYED_REFS
2777         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2778 #endif
2779         ret = __btrfs_run_delayed_refs(trans, root, count);
2780         if (ret < 0) {
2781                 btrfs_abort_transaction(trans, root, ret);
2782                 return ret;
2783         }
2784
2785         if (run_all) {
2786                 if (!list_empty(&trans->new_bgs))
2787                         btrfs_create_pending_block_groups(trans, root);
2788
2789                 spin_lock(&delayed_refs->lock);
2790                 node = rb_first(&delayed_refs->href_root);
2791                 if (!node) {
2792                         spin_unlock(&delayed_refs->lock);
2793                         goto out;
2794                 }
2795                 count = (unsigned long)-1;
2796
2797                 while (node) {
2798                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2799                                         href_node);
2800                         if (btrfs_delayed_ref_is_head(&head->node)) {
2801                                 struct btrfs_delayed_ref_node *ref;
2802
2803                                 ref = &head->node;
2804                                 atomic_inc(&ref->refs);
2805
2806                                 spin_unlock(&delayed_refs->lock);
2807                                 /*
2808                                  * Mutex was contended, block until it's
2809                                  * released and try again
2810                                  */
2811                                 mutex_lock(&head->mutex);
2812                                 mutex_unlock(&head->mutex);
2813
2814                                 btrfs_put_delayed_ref(ref);
2815                                 cond_resched();
2816                                 goto again;
2817                         } else {
2818                                 WARN_ON(1);
2819                         }
2820                         node = rb_next(node);
2821                 }
2822                 spin_unlock(&delayed_refs->lock);
2823                 cond_resched();
2824                 goto again;
2825         }
2826 out:
2827         assert_qgroups_uptodate(trans);
2828         return 0;
2829 }
2830
2831 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2832                                 struct btrfs_root *root,
2833                                 u64 bytenr, u64 num_bytes, u64 flags,
2834                                 int level, int is_data)
2835 {
2836         struct btrfs_delayed_extent_op *extent_op;
2837         int ret;
2838
2839         extent_op = btrfs_alloc_delayed_extent_op();
2840         if (!extent_op)
2841                 return -ENOMEM;
2842
2843         extent_op->flags_to_set = flags;
2844         extent_op->update_flags = 1;
2845         extent_op->update_key = 0;
2846         extent_op->is_data = is_data ? 1 : 0;
2847         extent_op->level = level;
2848
2849         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2850                                           num_bytes, extent_op);
2851         if (ret)
2852                 btrfs_free_delayed_extent_op(extent_op);
2853         return ret;
2854 }
2855
2856 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2857                                       struct btrfs_root *root,
2858                                       struct btrfs_path *path,
2859                                       u64 objectid, u64 offset, u64 bytenr)
2860 {
2861         struct btrfs_delayed_ref_head *head;
2862         struct btrfs_delayed_ref_node *ref;
2863         struct btrfs_delayed_data_ref *data_ref;
2864         struct btrfs_delayed_ref_root *delayed_refs;
2865         int ret = 0;
2866
2867         delayed_refs = &trans->transaction->delayed_refs;
2868         spin_lock(&delayed_refs->lock);
2869         head = btrfs_find_delayed_ref_head(trans, bytenr);
2870         if (!head) {
2871                 spin_unlock(&delayed_refs->lock);
2872                 return 0;
2873         }
2874
2875         if (!mutex_trylock(&head->mutex)) {
2876                 atomic_inc(&head->node.refs);
2877                 spin_unlock(&delayed_refs->lock);
2878
2879                 btrfs_release_path(path);
2880
2881                 /*
2882                  * Mutex was contended, block until it's released and let
2883                  * caller try again
2884                  */
2885                 mutex_lock(&head->mutex);
2886                 mutex_unlock(&head->mutex);
2887                 btrfs_put_delayed_ref(&head->node);
2888                 return -EAGAIN;
2889         }
2890         spin_unlock(&delayed_refs->lock);
2891
2892         spin_lock(&head->lock);
2893         list_for_each_entry(ref, &head->ref_list, list) {
2894                 /* If it's a shared ref we know a cross reference exists */
2895                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2896                         ret = 1;
2897                         break;
2898                 }
2899
2900                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2901
2902                 /*
2903                  * If our ref doesn't match the one we're currently looking at
2904                  * then we have a cross reference.
2905                  */
2906                 if (data_ref->root != root->root_key.objectid ||
2907                     data_ref->objectid != objectid ||
2908                     data_ref->offset != offset) {
2909                         ret = 1;
2910                         break;
2911                 }
2912         }
2913         spin_unlock(&head->lock);
2914         mutex_unlock(&head->mutex);
2915         return ret;
2916 }
2917
2918 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2919                                         struct btrfs_root *root,
2920                                         struct btrfs_path *path,
2921                                         u64 objectid, u64 offset, u64 bytenr)
2922 {
2923         struct btrfs_root *extent_root = root->fs_info->extent_root;
2924         struct extent_buffer *leaf;
2925         struct btrfs_extent_data_ref *ref;
2926         struct btrfs_extent_inline_ref *iref;
2927         struct btrfs_extent_item *ei;
2928         struct btrfs_key key;
2929         u32 item_size;
2930         int ret;
2931
2932         key.objectid = bytenr;
2933         key.offset = (u64)-1;
2934         key.type = BTRFS_EXTENT_ITEM_KEY;
2935
2936         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2937         if (ret < 0)
2938                 goto out;
2939         BUG_ON(ret == 0); /* Corruption */
2940
2941         ret = -ENOENT;
2942         if (path->slots[0] == 0)
2943                 goto out;
2944
2945         path->slots[0]--;
2946         leaf = path->nodes[0];
2947         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2948
2949         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2950                 goto out;
2951
2952         ret = 1;
2953         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2954 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2955         if (item_size < sizeof(*ei)) {
2956                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2957                 goto out;
2958         }
2959 #endif
2960         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2961
2962         if (item_size != sizeof(*ei) +
2963             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2964                 goto out;
2965
2966         if (btrfs_extent_generation(leaf, ei) <=
2967             btrfs_root_last_snapshot(&root->root_item))
2968                 goto out;
2969
2970         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2971         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2972             BTRFS_EXTENT_DATA_REF_KEY)
2973                 goto out;
2974
2975         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2976         if (btrfs_extent_refs(leaf, ei) !=
2977             btrfs_extent_data_ref_count(leaf, ref) ||
2978             btrfs_extent_data_ref_root(leaf, ref) !=
2979             root->root_key.objectid ||
2980             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2981             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2982                 goto out;
2983
2984         ret = 0;
2985 out:
2986         return ret;
2987 }
2988
2989 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2990                           struct btrfs_root *root,
2991                           u64 objectid, u64 offset, u64 bytenr)
2992 {
2993         struct btrfs_path *path;
2994         int ret;
2995         int ret2;
2996
2997         path = btrfs_alloc_path();
2998         if (!path)
2999                 return -ENOENT;
3000
3001         do {
3002                 ret = check_committed_ref(trans, root, path, objectid,
3003                                           offset, bytenr);
3004                 if (ret && ret != -ENOENT)
3005                         goto out;
3006
3007                 ret2 = check_delayed_ref(trans, root, path, objectid,
3008                                          offset, bytenr);
3009         } while (ret2 == -EAGAIN);
3010
3011         if (ret2 && ret2 != -ENOENT) {
3012                 ret = ret2;
3013                 goto out;
3014         }
3015
3016         if (ret != -ENOENT || ret2 != -ENOENT)
3017                 ret = 0;
3018 out:
3019         btrfs_free_path(path);
3020         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3021                 WARN_ON(ret > 0);
3022         return ret;
3023 }
3024
3025 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3026                            struct btrfs_root *root,
3027                            struct extent_buffer *buf,
3028                            int full_backref, int inc)
3029 {
3030         u64 bytenr;
3031         u64 num_bytes;
3032         u64 parent;
3033         u64 ref_root;
3034         u32 nritems;
3035         struct btrfs_key key;
3036         struct btrfs_file_extent_item *fi;
3037         int i;
3038         int level;
3039         int ret = 0;
3040         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3041                             u64, u64, u64, u64, u64, u64, int);
3042
3043
3044         if (btrfs_test_is_dummy_root(root))
3045                 return 0;
3046
3047         ref_root = btrfs_header_owner(buf);
3048         nritems = btrfs_header_nritems(buf);
3049         level = btrfs_header_level(buf);
3050
3051         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3052                 return 0;
3053
3054         if (inc)
3055                 process_func = btrfs_inc_extent_ref;
3056         else
3057                 process_func = btrfs_free_extent;
3058
3059         if (full_backref)
3060                 parent = buf->start;
3061         else
3062                 parent = 0;
3063
3064         for (i = 0; i < nritems; i++) {
3065                 if (level == 0) {
3066                         btrfs_item_key_to_cpu(buf, &key, i);
3067                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3068                                 continue;
3069                         fi = btrfs_item_ptr(buf, i,
3070                                             struct btrfs_file_extent_item);
3071                         if (btrfs_file_extent_type(buf, fi) ==
3072                             BTRFS_FILE_EXTENT_INLINE)
3073                                 continue;
3074                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3075                         if (bytenr == 0)
3076                                 continue;
3077
3078                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3079                         key.offset -= btrfs_file_extent_offset(buf, fi);
3080                         ret = process_func(trans, root, bytenr, num_bytes,
3081                                            parent, ref_root, key.objectid,
3082                                            key.offset, 1);
3083                         if (ret)
3084                                 goto fail;
3085                 } else {
3086                         bytenr = btrfs_node_blockptr(buf, i);
3087                         num_bytes = root->nodesize;
3088                         ret = process_func(trans, root, bytenr, num_bytes,
3089                                            parent, ref_root, level - 1, 0,
3090                                            1);
3091                         if (ret)
3092                                 goto fail;
3093                 }
3094         }
3095         return 0;
3096 fail:
3097         return ret;
3098 }
3099
3100 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3101                   struct extent_buffer *buf, int full_backref)
3102 {
3103         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3104 }
3105
3106 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3107                   struct extent_buffer *buf, int full_backref)
3108 {
3109         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3110 }
3111
3112 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3113                                  struct btrfs_root *root,
3114                                  struct btrfs_path *path,
3115                                  struct btrfs_block_group_cache *cache)
3116 {
3117         int ret;
3118         struct btrfs_root *extent_root = root->fs_info->extent_root;
3119         unsigned long bi;
3120         struct extent_buffer *leaf;
3121
3122         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3123         if (ret) {
3124                 if (ret > 0)
3125                         ret = -ENOENT;
3126                 goto fail;
3127         }
3128
3129         leaf = path->nodes[0];
3130         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3131         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3132         btrfs_mark_buffer_dirty(leaf);
3133 fail:
3134         btrfs_release_path(path);
3135         return ret;
3136
3137 }
3138
3139 static struct btrfs_block_group_cache *
3140 next_block_group(struct btrfs_root *root,
3141                  struct btrfs_block_group_cache *cache)
3142 {
3143         struct rb_node *node;
3144
3145         spin_lock(&root->fs_info->block_group_cache_lock);
3146
3147         /* If our block group was removed, we need a full search. */
3148         if (RB_EMPTY_NODE(&cache->cache_node)) {
3149                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3150
3151                 spin_unlock(&root->fs_info->block_group_cache_lock);
3152                 btrfs_put_block_group(cache);
3153                 cache = btrfs_lookup_first_block_group(root->fs_info,
3154                                                        next_bytenr);
3155                 return cache;
3156         }
3157         node = rb_next(&cache->cache_node);
3158         btrfs_put_block_group(cache);
3159         if (node) {
3160                 cache = rb_entry(node, struct btrfs_block_group_cache,
3161                                  cache_node);
3162                 btrfs_get_block_group(cache);
3163         } else
3164                 cache = NULL;
3165         spin_unlock(&root->fs_info->block_group_cache_lock);
3166         return cache;
3167 }
3168
3169 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3170                             struct btrfs_trans_handle *trans,
3171                             struct btrfs_path *path)
3172 {
3173         struct btrfs_root *root = block_group->fs_info->tree_root;
3174         struct inode *inode = NULL;
3175         u64 alloc_hint = 0;
3176         int dcs = BTRFS_DC_ERROR;
3177         u64 num_pages = 0;
3178         int retries = 0;
3179         int ret = 0;
3180
3181         /*
3182          * If this block group is smaller than 100 megs don't bother caching the
3183          * block group.
3184          */
3185         if (block_group->key.offset < (100 * 1024 * 1024)) {
3186                 spin_lock(&block_group->lock);
3187                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3188                 spin_unlock(&block_group->lock);
3189                 return 0;
3190         }
3191
3192         if (trans->aborted)
3193                 return 0;
3194 again:
3195         inode = lookup_free_space_inode(root, block_group, path);
3196         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3197                 ret = PTR_ERR(inode);
3198                 btrfs_release_path(path);
3199                 goto out;
3200         }
3201
3202         if (IS_ERR(inode)) {
3203                 BUG_ON(retries);
3204                 retries++;
3205
3206                 if (block_group->ro)
3207                         goto out_free;
3208
3209                 ret = create_free_space_inode(root, trans, block_group, path);
3210                 if (ret)
3211                         goto out_free;
3212                 goto again;
3213         }
3214
3215         /* We've already setup this transaction, go ahead and exit */
3216         if (block_group->cache_generation == trans->transid &&
3217             i_size_read(inode)) {
3218                 dcs = BTRFS_DC_SETUP;
3219                 goto out_put;
3220         }
3221
3222         /*
3223          * We want to set the generation to 0, that way if anything goes wrong
3224          * from here on out we know not to trust this cache when we load up next
3225          * time.
3226          */
3227         BTRFS_I(inode)->generation = 0;
3228         ret = btrfs_update_inode(trans, root, inode);
3229         if (ret) {
3230                 /*
3231                  * So theoretically we could recover from this, simply set the
3232                  * super cache generation to 0 so we know to invalidate the
3233                  * cache, but then we'd have to keep track of the block groups
3234                  * that fail this way so we know we _have_ to reset this cache
3235                  * before the next commit or risk reading stale cache.  So to
3236                  * limit our exposure to horrible edge cases lets just abort the
3237                  * transaction, this only happens in really bad situations
3238                  * anyway.
3239                  */
3240                 btrfs_abort_transaction(trans, root, ret);
3241                 goto out_put;
3242         }
3243         WARN_ON(ret);
3244
3245         if (i_size_read(inode) > 0) {
3246                 ret = btrfs_check_trunc_cache_free_space(root,
3247                                         &root->fs_info->global_block_rsv);
3248                 if (ret)
3249                         goto out_put;
3250
3251                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3252                 if (ret)
3253                         goto out_put;
3254         }
3255
3256         spin_lock(&block_group->lock);
3257         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3258             !btrfs_test_opt(root, SPACE_CACHE)) {
3259                 /*
3260                  * don't bother trying to write stuff out _if_
3261                  * a) we're not cached,
3262                  * b) we're with nospace_cache mount option.
3263                  */
3264                 dcs = BTRFS_DC_WRITTEN;
3265                 spin_unlock(&block_group->lock);
3266                 goto out_put;
3267         }
3268         spin_unlock(&block_group->lock);
3269
3270         /*
3271          * Try to preallocate enough space based on how big the block group is.
3272          * Keep in mind this has to include any pinned space which could end up
3273          * taking up quite a bit since it's not folded into the other space
3274          * cache.
3275          */
3276         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3277         if (!num_pages)
3278                 num_pages = 1;
3279
3280         num_pages *= 16;
3281         num_pages *= PAGE_CACHE_SIZE;
3282
3283         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3284         if (ret)
3285                 goto out_put;
3286
3287         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3288                                               num_pages, num_pages,
3289                                               &alloc_hint);
3290         if (!ret)
3291                 dcs = BTRFS_DC_SETUP;
3292         btrfs_free_reserved_data_space(inode, num_pages);
3293
3294 out_put:
3295         iput(inode);
3296 out_free:
3297         btrfs_release_path(path);
3298 out:
3299         spin_lock(&block_group->lock);
3300         if (!ret && dcs == BTRFS_DC_SETUP)
3301                 block_group->cache_generation = trans->transid;
3302         block_group->disk_cache_state = dcs;
3303         spin_unlock(&block_group->lock);
3304
3305         return ret;
3306 }
3307
3308 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3309                             struct btrfs_root *root)
3310 {
3311         struct btrfs_block_group_cache *cache, *tmp;
3312         struct btrfs_transaction *cur_trans = trans->transaction;
3313         struct btrfs_path *path;
3314
3315         if (list_empty(&cur_trans->dirty_bgs) ||
3316             !btrfs_test_opt(root, SPACE_CACHE))
3317                 return 0;
3318
3319         path = btrfs_alloc_path();
3320         if (!path)
3321                 return -ENOMEM;
3322
3323         /* Could add new block groups, use _safe just in case */
3324         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3325                                  dirty_list) {
3326                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3327                         cache_save_setup(cache, trans, path);
3328         }
3329
3330         btrfs_free_path(path);
3331         return 0;
3332 }
3333
3334 /*
3335  * transaction commit does final block group cache writeback during a
3336  * critical section where nothing is allowed to change the FS.  This is
3337  * required in order for the cache to actually match the block group,
3338  * but can introduce a lot of latency into the commit.
3339  *
3340  * So, btrfs_start_dirty_block_groups is here to kick off block group
3341  * cache IO.  There's a chance we'll have to redo some of it if the
3342  * block group changes again during the commit, but it greatly reduces
3343  * the commit latency by getting rid of the easy block groups while
3344  * we're still allowing others to join the commit.
3345  */
3346 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3347                                    struct btrfs_root *root)
3348 {
3349         struct btrfs_block_group_cache *cache;
3350         struct btrfs_transaction *cur_trans = trans->transaction;
3351         int ret = 0;
3352         int should_put;
3353         struct btrfs_path *path = NULL;
3354         LIST_HEAD(dirty);
3355         struct list_head *io = &cur_trans->io_bgs;
3356         int num_started = 0;
3357         int loops = 0;
3358
3359         spin_lock(&cur_trans->dirty_bgs_lock);
3360         if (list_empty(&cur_trans->dirty_bgs)) {
3361                 spin_unlock(&cur_trans->dirty_bgs_lock);
3362                 return 0;
3363         }
3364         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3365         spin_unlock(&cur_trans->dirty_bgs_lock);
3366
3367 again:
3368         /*
3369          * make sure all the block groups on our dirty list actually
3370          * exist
3371          */
3372         btrfs_create_pending_block_groups(trans, root);
3373
3374         if (!path) {
3375                 path = btrfs_alloc_path();
3376                 if (!path)
3377                         return -ENOMEM;
3378         }
3379
3380         /*
3381          * cache_write_mutex is here only to save us from balance or automatic
3382          * removal of empty block groups deleting this block group while we are
3383          * writing out the cache
3384          */
3385         mutex_lock(&trans->transaction->cache_write_mutex);
3386         while (!list_empty(&dirty)) {
3387                 cache = list_first_entry(&dirty,
3388                                          struct btrfs_block_group_cache,
3389                                          dirty_list);
3390                 /*
3391                  * this can happen if something re-dirties a block
3392                  * group that is already under IO.  Just wait for it to
3393                  * finish and then do it all again
3394                  */
3395                 if (!list_empty(&cache->io_list)) {
3396                         list_del_init(&cache->io_list);
3397                         btrfs_wait_cache_io(root, trans, cache,
3398                                             &cache->io_ctl, path,
3399                                             cache->key.objectid);
3400                         btrfs_put_block_group(cache);
3401                 }
3402
3403
3404                 /*
3405                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3406                  * if it should update the cache_state.  Don't delete
3407                  * until after we wait.
3408                  *
3409                  * Since we're not running in the commit critical section
3410                  * we need the dirty_bgs_lock to protect from update_block_group
3411                  */
3412                 spin_lock(&cur_trans->dirty_bgs_lock);
3413                 list_del_init(&cache->dirty_list);
3414                 spin_unlock(&cur_trans->dirty_bgs_lock);
3415
3416                 should_put = 1;
3417
3418                 cache_save_setup(cache, trans, path);
3419
3420                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3421                         cache->io_ctl.inode = NULL;
3422                         ret = btrfs_write_out_cache(root, trans, cache, path);
3423                         if (ret == 0 && cache->io_ctl.inode) {
3424                                 num_started++;
3425                                 should_put = 0;
3426
3427                                 /*
3428                                  * the cache_write_mutex is protecting
3429                                  * the io_list
3430                                  */
3431                                 list_add_tail(&cache->io_list, io);
3432                         } else {
3433                                 /*
3434                                  * if we failed to write the cache, the
3435                                  * generation will be bad and life goes on
3436                                  */
3437                                 ret = 0;
3438                         }
3439                 }
3440                 if (!ret) {
3441                         ret = write_one_cache_group(trans, root, path, cache);
3442                         /*
3443                          * Our block group might still be attached to the list
3444                          * of new block groups in the transaction handle of some
3445                          * other task (struct btrfs_trans_handle->new_bgs). This
3446                          * means its block group item isn't yet in the extent
3447                          * tree. If this happens ignore the error, as we will
3448                          * try again later in the critical section of the
3449                          * transaction commit.
3450                          */
3451                         if (ret == -ENOENT) {
3452                                 ret = 0;
3453                                 spin_lock(&cur_trans->dirty_bgs_lock);
3454                                 if (list_empty(&cache->dirty_list)) {
3455                                         list_add_tail(&cache->dirty_list,
3456                                                       &cur_trans->dirty_bgs);
3457                                         btrfs_get_block_group(cache);
3458                                 }
3459                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3460                         } else if (ret) {
3461                                 btrfs_abort_transaction(trans, root, ret);
3462                         }
3463                 }
3464
3465                 /* if its not on the io list, we need to put the block group */
3466                 if (should_put)
3467                         btrfs_put_block_group(cache);
3468
3469                 if (ret)
3470                         break;
3471
3472                 /*
3473                  * Avoid blocking other tasks for too long. It might even save
3474                  * us from writing caches for block groups that are going to be
3475                  * removed.
3476                  */
3477                 mutex_unlock(&trans->transaction->cache_write_mutex);
3478                 mutex_lock(&trans->transaction->cache_write_mutex);
3479         }
3480         mutex_unlock(&trans->transaction->cache_write_mutex);
3481
3482         /*
3483          * go through delayed refs for all the stuff we've just kicked off
3484          * and then loop back (just once)
3485          */
3486         ret = btrfs_run_delayed_refs(trans, root, 0);
3487         if (!ret && loops == 0) {
3488                 loops++;
3489                 spin_lock(&cur_trans->dirty_bgs_lock);
3490                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3491                 /*
3492                  * dirty_bgs_lock protects us from concurrent block group
3493                  * deletes too (not just cache_write_mutex).
3494                  */
3495                 if (!list_empty(&dirty)) {
3496                         spin_unlock(&cur_trans->dirty_bgs_lock);
3497                         goto again;
3498                 }
3499                 spin_unlock(&cur_trans->dirty_bgs_lock);
3500         }
3501
3502         btrfs_free_path(path);
3503         return ret;
3504 }
3505
3506 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3507                                    struct btrfs_root *root)
3508 {
3509         struct btrfs_block_group_cache *cache;
3510         struct btrfs_transaction *cur_trans = trans->transaction;
3511         int ret = 0;
3512         int should_put;
3513         struct btrfs_path *path;
3514         struct list_head *io = &cur_trans->io_bgs;
3515         int num_started = 0;
3516
3517         path = btrfs_alloc_path();
3518         if (!path)
3519                 return -ENOMEM;
3520
3521         /*
3522          * We don't need the lock here since we are protected by the transaction
3523          * commit.  We want to do the cache_save_setup first and then run the
3524          * delayed refs to make sure we have the best chance at doing this all
3525          * in one shot.
3526          */
3527         while (!list_empty(&cur_trans->dirty_bgs)) {
3528                 cache = list_first_entry(&cur_trans->dirty_bgs,
3529                                          struct btrfs_block_group_cache,
3530                                          dirty_list);
3531
3532                 /*
3533                  * this can happen if cache_save_setup re-dirties a block
3534                  * group that is already under IO.  Just wait for it to
3535                  * finish and then do it all again
3536                  */
3537                 if (!list_empty(&cache->io_list)) {
3538                         list_del_init(&cache->io_list);
3539                         btrfs_wait_cache_io(root, trans, cache,
3540                                             &cache->io_ctl, path,
3541                                             cache->key.objectid);
3542                         btrfs_put_block_group(cache);
3543                 }
3544
3545                 /*
3546                  * don't remove from the dirty list until after we've waited
3547                  * on any pending IO
3548                  */
3549                 list_del_init(&cache->dirty_list);
3550                 should_put = 1;
3551
3552                 cache_save_setup(cache, trans, path);
3553
3554                 if (!ret)
3555                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3556
3557                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3558                         cache->io_ctl.inode = NULL;
3559                         ret = btrfs_write_out_cache(root, trans, cache, path);
3560                         if (ret == 0 && cache->io_ctl.inode) {
3561                                 num_started++;
3562                                 should_put = 0;
3563                                 list_add_tail(&cache->io_list, io);
3564                         } else {
3565                                 /*
3566                                  * if we failed to write the cache, the
3567                                  * generation will be bad and life goes on
3568                                  */
3569                                 ret = 0;
3570                         }
3571                 }
3572                 if (!ret) {
3573                         ret = write_one_cache_group(trans, root, path, cache);
3574                         if (ret)
3575                                 btrfs_abort_transaction(trans, root, ret);
3576                 }
3577
3578                 /* if its not on the io list, we need to put the block group */
3579                 if (should_put)
3580                         btrfs_put_block_group(cache);
3581         }
3582
3583         while (!list_empty(io)) {
3584                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3585                                          io_list);
3586                 list_del_init(&cache->io_list);
3587                 btrfs_wait_cache_io(root, trans, cache,
3588                                     &cache->io_ctl, path, cache->key.objectid);
3589                 btrfs_put_block_group(cache);
3590         }
3591
3592         btrfs_free_path(path);
3593         return ret;
3594 }
3595
3596 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3597 {
3598         struct btrfs_block_group_cache *block_group;
3599         int readonly = 0;
3600
3601         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3602         if (!block_group || block_group->ro)
3603                 readonly = 1;
3604         if (block_group)
3605                 btrfs_put_block_group(block_group);
3606         return readonly;
3607 }
3608
3609 static const char *alloc_name(u64 flags)
3610 {
3611         switch (flags) {
3612         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3613                 return "mixed";
3614         case BTRFS_BLOCK_GROUP_METADATA:
3615                 return "metadata";
3616         case BTRFS_BLOCK_GROUP_DATA:
3617                 return "data";
3618         case BTRFS_BLOCK_GROUP_SYSTEM:
3619                 return "system";
3620         default:
3621                 WARN_ON(1);
3622                 return "invalid-combination";
3623         };
3624 }
3625
3626 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3627                              u64 total_bytes, u64 bytes_used,
3628                              struct btrfs_space_info **space_info)
3629 {
3630         struct btrfs_space_info *found;
3631         int i;
3632         int factor;
3633         int ret;
3634
3635         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3636                      BTRFS_BLOCK_GROUP_RAID10))
3637                 factor = 2;
3638         else
3639                 factor = 1;
3640
3641         found = __find_space_info(info, flags);
3642         if (found) {
3643                 spin_lock(&found->lock);
3644                 found->total_bytes += total_bytes;
3645                 found->disk_total += total_bytes * factor;
3646                 found->bytes_used += bytes_used;
3647                 found->disk_used += bytes_used * factor;
3648                 if (total_bytes > 0)
3649                         found->full = 0;
3650                 spin_unlock(&found->lock);
3651                 *space_info = found;
3652                 return 0;
3653         }
3654         found = kzalloc(sizeof(*found), GFP_NOFS);
3655         if (!found)
3656                 return -ENOMEM;
3657
3658         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3659         if (ret) {
3660                 kfree(found);
3661                 return ret;
3662         }
3663
3664         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3665                 INIT_LIST_HEAD(&found->block_groups[i]);
3666         init_rwsem(&found->groups_sem);
3667         spin_lock_init(&found->lock);
3668         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3669         found->total_bytes = total_bytes;
3670         found->disk_total = total_bytes * factor;
3671         found->bytes_used = bytes_used;
3672         found->disk_used = bytes_used * factor;
3673         found->bytes_pinned = 0;
3674         found->bytes_reserved = 0;
3675         found->bytes_readonly = 0;
3676         found->bytes_may_use = 0;
3677         if (total_bytes > 0)
3678                 found->full = 0;
3679         else
3680                 found->full = 1;
3681         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3682         found->chunk_alloc = 0;
3683         found->flush = 0;
3684         init_waitqueue_head(&found->wait);
3685         INIT_LIST_HEAD(&found->ro_bgs);
3686
3687         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3688                                     info->space_info_kobj, "%s",
3689                                     alloc_name(found->flags));
3690         if (ret) {
3691                 kfree(found);
3692                 return ret;
3693         }
3694
3695         *space_info = found;
3696         list_add_rcu(&found->list, &info->space_info);
3697         if (flags & BTRFS_BLOCK_GROUP_DATA)
3698                 info->data_sinfo = found;
3699
3700         return ret;
3701 }
3702
3703 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3704 {
3705         u64 extra_flags = chunk_to_extended(flags) &
3706                                 BTRFS_EXTENDED_PROFILE_MASK;
3707
3708         write_seqlock(&fs_info->profiles_lock);
3709         if (flags & BTRFS_BLOCK_GROUP_DATA)
3710                 fs_info->avail_data_alloc_bits |= extra_flags;
3711         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3712                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3713         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3714                 fs_info->avail_system_alloc_bits |= extra_flags;
3715         write_sequnlock(&fs_info->profiles_lock);
3716 }
3717
3718 /*
3719  * returns target flags in extended format or 0 if restripe for this
3720  * chunk_type is not in progress
3721  *
3722  * should be called with either volume_mutex or balance_lock held
3723  */
3724 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3725 {
3726         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3727         u64 target = 0;
3728
3729         if (!bctl)
3730                 return 0;
3731
3732         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3733             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3734                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3735         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3736                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3737                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3738         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3739                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3740                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3741         }
3742
3743         return target;
3744 }
3745
3746 /*
3747  * @flags: available profiles in extended format (see ctree.h)
3748  *
3749  * Returns reduced profile in chunk format.  If profile changing is in
3750  * progress (either running or paused) picks the target profile (if it's
3751  * already available), otherwise falls back to plain reducing.
3752  */
3753 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3754 {
3755         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3756         u64 target;
3757         u64 tmp;
3758
3759         /*
3760          * see if restripe for this chunk_type is in progress, if so
3761          * try to reduce to the target profile
3762          */
3763         spin_lock(&root->fs_info->balance_lock);
3764         target = get_restripe_target(root->fs_info, flags);
3765         if (target) {
3766                 /* pick target profile only if it's already available */
3767                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3768                         spin_unlock(&root->fs_info->balance_lock);
3769                         return extended_to_chunk(target);
3770                 }
3771         }
3772         spin_unlock(&root->fs_info->balance_lock);
3773
3774         /* First, mask out the RAID levels which aren't possible */
3775         if (num_devices == 1)
3776                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3777                            BTRFS_BLOCK_GROUP_RAID5);
3778         if (num_devices < 3)
3779                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3780         if (num_devices < 4)
3781                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3782
3783         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3784                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3785                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3786         flags &= ~tmp;
3787
3788         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3789                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3790         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3791                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3792         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3793                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3794         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3795                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3796         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3797                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3798
3799         return extended_to_chunk(flags | tmp);
3800 }
3801
3802 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3803 {
3804         unsigned seq;
3805         u64 flags;
3806
3807         do {
3808                 flags = orig_flags;
3809                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3810
3811                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3812                         flags |= root->fs_info->avail_data_alloc_bits;
3813                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3814                         flags |= root->fs_info->avail_system_alloc_bits;
3815                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3816                         flags |= root->fs_info->avail_metadata_alloc_bits;
3817         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3818
3819         return btrfs_reduce_alloc_profile(root, flags);
3820 }
3821
3822 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3823 {
3824         u64 flags;
3825         u64 ret;
3826
3827         if (data)
3828                 flags = BTRFS_BLOCK_GROUP_DATA;
3829         else if (root == root->fs_info->chunk_root)
3830                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3831         else
3832                 flags = BTRFS_BLOCK_GROUP_METADATA;
3833
3834         ret = get_alloc_profile(root, flags);
3835         return ret;
3836 }
3837
3838 /*
3839  * This will check the space that the inode allocates from to make sure we have
3840  * enough space for bytes.
3841  */
3842 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3843 {
3844         struct btrfs_space_info *data_sinfo;
3845         struct btrfs_root *root = BTRFS_I(inode)->root;
3846         struct btrfs_fs_info *fs_info = root->fs_info;
3847         u64 used;
3848         int ret = 0;
3849         int need_commit = 2;
3850         int have_pinned_space;
3851
3852         /* make sure bytes are sectorsize aligned */
3853         bytes = ALIGN(bytes, root->sectorsize);
3854
3855         if (btrfs_is_free_space_inode(inode)) {
3856                 need_commit = 0;
3857                 ASSERT(current->journal_info);
3858         }
3859
3860         data_sinfo = fs_info->data_sinfo;
3861         if (!data_sinfo)
3862                 goto alloc;
3863
3864 again:
3865         /* make sure we have enough space to handle the data first */
3866         spin_lock(&data_sinfo->lock);
3867         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3868                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3869                 data_sinfo->bytes_may_use;
3870
3871         if (used + bytes > data_sinfo->total_bytes) {
3872                 struct btrfs_trans_handle *trans;
3873
3874                 /*
3875                  * if we don't have enough free bytes in this space then we need
3876                  * to alloc a new chunk.
3877                  */
3878                 if (!data_sinfo->full) {
3879                         u64 alloc_target;
3880
3881                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3882                         spin_unlock(&data_sinfo->lock);
3883 alloc:
3884                         alloc_target = btrfs_get_alloc_profile(root, 1);
3885                         /*
3886                          * It is ugly that we don't call nolock join
3887                          * transaction for the free space inode case here.
3888                          * But it is safe because we only do the data space
3889                          * reservation for the free space cache in the
3890                          * transaction context, the common join transaction
3891                          * just increase the counter of the current transaction
3892                          * handler, doesn't try to acquire the trans_lock of
3893                          * the fs.
3894                          */
3895                         trans = btrfs_join_transaction(root);
3896                         if (IS_ERR(trans))
3897                                 return PTR_ERR(trans);
3898
3899                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3900                                              alloc_target,
3901                                              CHUNK_ALLOC_NO_FORCE);
3902                         btrfs_end_transaction(trans, root);
3903                         if (ret < 0) {
3904                                 if (ret != -ENOSPC)
3905                                         return ret;
3906                                 else {
3907                                         have_pinned_space = 1;
3908                                         goto commit_trans;
3909                                 }
3910                         }
3911
3912                         if (!data_sinfo)
3913                                 data_sinfo = fs_info->data_sinfo;
3914
3915                         goto again;
3916                 }
3917
3918                 /*
3919                  * If we don't have enough pinned space to deal with this
3920                  * allocation, and no removed chunk in current transaction,
3921                  * don't bother committing the transaction.
3922                  */
3923                 have_pinned_space = percpu_counter_compare(
3924                         &data_sinfo->total_bytes_pinned,
3925                         used + bytes - data_sinfo->total_bytes);
3926                 spin_unlock(&data_sinfo->lock);
3927
3928                 /* commit the current transaction and try again */
3929 commit_trans:
3930                 if (need_commit &&
3931                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3932                         need_commit--;
3933
3934                         if (need_commit > 0)
3935                                 btrfs_wait_ordered_roots(fs_info, -1);
3936
3937                         trans = btrfs_join_transaction(root);
3938                         if (IS_ERR(trans))
3939                                 return PTR_ERR(trans);
3940                         if (have_pinned_space >= 0 ||
3941                             trans->transaction->have_free_bgs ||
3942                             need_commit > 0) {
3943                                 ret = btrfs_commit_transaction(trans, root);
3944                                 if (ret)
3945                                         return ret;
3946                                 /*
3947                                  * make sure that all running delayed iput are
3948                                  * done
3949                                  */
3950                                 down_write(&root->fs_info->delayed_iput_sem);
3951                                 up_write(&root->fs_info->delayed_iput_sem);
3952                                 goto again;
3953                         } else {
3954                                 btrfs_end_transaction(trans, root);
3955                         }
3956                 }
3957
3958                 trace_btrfs_space_reservation(root->fs_info,
3959                                               "space_info:enospc",
3960                                               data_sinfo->flags, bytes, 1);
3961                 return -ENOSPC;
3962         }
3963         ret = btrfs_qgroup_reserve(root, write_bytes);
3964         if (ret)
3965                 goto out;
3966         data_sinfo->bytes_may_use += bytes;
3967         trace_btrfs_space_reservation(root->fs_info, "space_info",
3968                                       data_sinfo->flags, bytes, 1);
3969 out:
3970         spin_unlock(&data_sinfo->lock);
3971
3972         return ret;
3973 }
3974
3975 /*
3976  * Called if we need to clear a data reservation for this inode.
3977  */
3978 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3979 {
3980         struct btrfs_root *root = BTRFS_I(inode)->root;
3981         struct btrfs_space_info *data_sinfo;
3982
3983         /* make sure bytes are sectorsize aligned */
3984         bytes = ALIGN(bytes, root->sectorsize);
3985
3986         data_sinfo = root->fs_info->data_sinfo;
3987         spin_lock(&data_sinfo->lock);
3988         WARN_ON(data_sinfo->bytes_may_use < bytes);
3989         data_sinfo->bytes_may_use -= bytes;
3990         trace_btrfs_space_reservation(root->fs_info, "space_info",
3991                                       data_sinfo->flags, bytes, 0);
3992         spin_unlock(&data_sinfo->lock);
3993 }
3994
3995 static void force_metadata_allocation(struct btrfs_fs_info *info)
3996 {
3997         struct list_head *head = &info->space_info;
3998         struct btrfs_space_info *found;
3999
4000         rcu_read_lock();
4001         list_for_each_entry_rcu(found, head, list) {
4002                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4003                         found->force_alloc = CHUNK_ALLOC_FORCE;
4004         }
4005         rcu_read_unlock();
4006 }
4007
4008 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4009 {
4010         return (global->size << 1);
4011 }
4012
4013 static int should_alloc_chunk(struct btrfs_root *root,
4014                               struct btrfs_space_info *sinfo, int force)
4015 {
4016         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4017         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4018         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4019         u64 thresh;
4020
4021         if (force == CHUNK_ALLOC_FORCE)
4022                 return 1;
4023
4024         /*
4025          * We need to take into account the global rsv because for all intents
4026          * and purposes it's used space.  Don't worry about locking the
4027          * global_rsv, it doesn't change except when the transaction commits.
4028          */
4029         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4030                 num_allocated += calc_global_rsv_need_space(global_rsv);
4031
4032         /*
4033          * in limited mode, we want to have some free space up to
4034          * about 1% of the FS size.
4035          */
4036         if (force == CHUNK_ALLOC_LIMITED) {
4037                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4038                 thresh = max_t(u64, 64 * 1024 * 1024,
4039                                div_factor_fine(thresh, 1));
4040
4041                 if (num_bytes - num_allocated < thresh)
4042                         return 1;
4043         }
4044
4045         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4046                 return 0;
4047         return 1;
4048 }
4049
4050 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4051 {
4052         u64 num_dev;
4053
4054         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4055                     BTRFS_BLOCK_GROUP_RAID0 |
4056                     BTRFS_BLOCK_GROUP_RAID5 |
4057                     BTRFS_BLOCK_GROUP_RAID6))
4058                 num_dev = root->fs_info->fs_devices->rw_devices;
4059         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4060                 num_dev = 2;
4061         else
4062                 num_dev = 1;    /* DUP or single */
4063
4064         return num_dev;
4065 }
4066
4067 /*
4068  * If @is_allocation is true, reserve space in the system space info necessary
4069  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4070  * removing a chunk.
4071  */
4072 void check_system_chunk(struct btrfs_trans_handle *trans,
4073                         struct btrfs_root *root,
4074                         u64 type)
4075 {
4076         struct btrfs_space_info *info;
4077         u64 left;
4078         u64 thresh;
4079         int ret = 0;
4080         u64 num_devs;
4081
4082         /*
4083          * Needed because we can end up allocating a system chunk and for an
4084          * atomic and race free space reservation in the chunk block reserve.
4085          */
4086         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4087
4088         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4089         spin_lock(&info->lock);
4090         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4091                 info->bytes_reserved - info->bytes_readonly -
4092                 info->bytes_may_use;
4093         spin_unlock(&info->lock);
4094
4095         num_devs = get_profile_num_devs(root, type);
4096
4097         /* num_devs device items to update and 1 chunk item to add or remove */
4098         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4099                 btrfs_calc_trans_metadata_size(root, 1);
4100
4101         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4102                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4103                         left, thresh, type);
4104                 dump_space_info(info, 0, 0);
4105         }
4106
4107         if (left < thresh) {
4108                 u64 flags;
4109
4110                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4111                 /*
4112                  * Ignore failure to create system chunk. We might end up not
4113                  * needing it, as we might not need to COW all nodes/leafs from
4114                  * the paths we visit in the chunk tree (they were already COWed
4115                  * or created in the current transaction for example).
4116                  */
4117                 ret = btrfs_alloc_chunk(trans, root, flags);
4118         }
4119
4120         if (!ret) {
4121                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4122                                           &root->fs_info->chunk_block_rsv,
4123                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4124                 if (!ret)
4125                         trans->chunk_bytes_reserved += thresh;
4126         }
4127 }
4128
4129 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4130                           struct btrfs_root *extent_root, u64 flags, int force)
4131 {
4132         struct btrfs_space_info *space_info;
4133         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4134         int wait_for_alloc = 0;
4135         int ret = 0;
4136
4137         /* Don't re-enter if we're already allocating a chunk */
4138         if (trans->allocating_chunk)
4139                 return -ENOSPC;
4140
4141         space_info = __find_space_info(extent_root->fs_info, flags);
4142         if (!space_info) {
4143                 ret = update_space_info(extent_root->fs_info, flags,
4144                                         0, 0, &space_info);
4145                 BUG_ON(ret); /* -ENOMEM */
4146         }
4147         BUG_ON(!space_info); /* Logic error */
4148
4149 again:
4150         spin_lock(&space_info->lock);
4151         if (force < space_info->force_alloc)
4152                 force = space_info->force_alloc;
4153         if (space_info->full) {
4154                 if (should_alloc_chunk(extent_root, space_info, force))
4155                         ret = -ENOSPC;
4156                 else
4157                         ret = 0;
4158                 spin_unlock(&space_info->lock);
4159                 return ret;
4160         }
4161
4162         if (!should_alloc_chunk(extent_root, space_info, force)) {
4163                 spin_unlock(&space_info->lock);
4164                 return 0;
4165         } else if (space_info->chunk_alloc) {
4166                 wait_for_alloc = 1;
4167         } else {
4168                 space_info->chunk_alloc = 1;
4169         }
4170
4171         spin_unlock(&space_info->lock);
4172
4173         mutex_lock(&fs_info->chunk_mutex);
4174
4175         /*
4176          * The chunk_mutex is held throughout the entirety of a chunk
4177          * allocation, so once we've acquired the chunk_mutex we know that the
4178          * other guy is done and we need to recheck and see if we should
4179          * allocate.
4180          */
4181         if (wait_for_alloc) {
4182                 mutex_unlock(&fs_info->chunk_mutex);
4183                 wait_for_alloc = 0;
4184                 goto again;
4185         }
4186
4187         trans->allocating_chunk = true;
4188
4189         /*
4190          * If we have mixed data/metadata chunks we want to make sure we keep
4191          * allocating mixed chunks instead of individual chunks.
4192          */
4193         if (btrfs_mixed_space_info(space_info))
4194                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4195
4196         /*
4197          * if we're doing a data chunk, go ahead and make sure that
4198          * we keep a reasonable number of metadata chunks allocated in the
4199          * FS as well.
4200          */
4201         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4202                 fs_info->data_chunk_allocations++;
4203                 if (!(fs_info->data_chunk_allocations %
4204                       fs_info->metadata_ratio))
4205                         force_metadata_allocation(fs_info);
4206         }
4207
4208         /*
4209          * Check if we have enough space in SYSTEM chunk because we may need
4210          * to update devices.
4211          */
4212         check_system_chunk(trans, extent_root, flags);
4213
4214         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4215         trans->allocating_chunk = false;
4216
4217         spin_lock(&space_info->lock);
4218         if (ret < 0 && ret != -ENOSPC)
4219                 goto out;
4220         if (ret)
4221                 space_info->full = 1;
4222         else
4223                 ret = 1;
4224
4225         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4226 out:
4227         space_info->chunk_alloc = 0;
4228         spin_unlock(&space_info->lock);
4229         mutex_unlock(&fs_info->chunk_mutex);
4230         /*
4231          * When we allocate a new chunk we reserve space in the chunk block
4232          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233          * add new nodes/leafs to it if we end up needing to do it when
4234          * inserting the chunk item and updating device items as part of the
4235          * second phase of chunk allocation, performed by
4236          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237          * large number of new block groups to create in our transaction
4238          * handle's new_bgs list to avoid exhausting the chunk block reserve
4239          * in extreme cases - like having a single transaction create many new
4240          * block groups when starting to write out the free space caches of all
4241          * the block groups that were made dirty during the lifetime of the
4242          * transaction.
4243          */
4244         if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245                 btrfs_create_pending_block_groups(trans, trans->root);
4246                 btrfs_trans_release_chunk_metadata(trans);
4247         }
4248         return ret;
4249 }
4250
4251 static int can_overcommit(struct btrfs_root *root,
4252                           struct btrfs_space_info *space_info, u64 bytes,
4253                           enum btrfs_reserve_flush_enum flush)
4254 {
4255         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4256         u64 profile = btrfs_get_alloc_profile(root, 0);
4257         u64 space_size;
4258         u64 avail;
4259         u64 used;
4260
4261         used = space_info->bytes_used + space_info->bytes_reserved +
4262                 space_info->bytes_pinned + space_info->bytes_readonly;
4263
4264         /*
4265          * We only want to allow over committing if we have lots of actual space
4266          * free, but if we don't have enough space to handle the global reserve
4267          * space then we could end up having a real enospc problem when trying
4268          * to allocate a chunk or some other such important allocation.
4269          */
4270         spin_lock(&global_rsv->lock);
4271         space_size = calc_global_rsv_need_space(global_rsv);
4272         spin_unlock(&global_rsv->lock);
4273         if (used + space_size >= space_info->total_bytes)
4274                 return 0;
4275
4276         used += space_info->bytes_may_use;
4277
4278         spin_lock(&root->fs_info->free_chunk_lock);
4279         avail = root->fs_info->free_chunk_space;
4280         spin_unlock(&root->fs_info->free_chunk_lock);
4281
4282         /*
4283          * If we have dup, raid1 or raid10 then only half of the free
4284          * space is actually useable.  For raid56, the space info used
4285          * doesn't include the parity drive, so we don't have to
4286          * change the math
4287          */
4288         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4289                        BTRFS_BLOCK_GROUP_RAID1 |
4290                        BTRFS_BLOCK_GROUP_RAID10))
4291                 avail >>= 1;
4292
4293         /*
4294          * If we aren't flushing all things, let us overcommit up to
4295          * 1/2th of the space. If we can flush, don't let us overcommit
4296          * too much, let it overcommit up to 1/8 of the space.
4297          */
4298         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4299                 avail >>= 3;
4300         else
4301                 avail >>= 1;
4302
4303         if (used + bytes < space_info->total_bytes + avail)
4304                 return 1;
4305         return 0;
4306 }
4307
4308 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4309                                          unsigned long nr_pages, int nr_items)
4310 {
4311         struct super_block *sb = root->fs_info->sb;
4312
4313         if (down_read_trylock(&sb->s_umount)) {
4314                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4315                 up_read(&sb->s_umount);
4316         } else {
4317                 /*
4318                  * We needn't worry the filesystem going from r/w to r/o though
4319                  * we don't acquire ->s_umount mutex, because the filesystem
4320                  * should guarantee the delalloc inodes list be empty after
4321                  * the filesystem is readonly(all dirty pages are written to
4322                  * the disk).
4323                  */
4324                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4325                 if (!current->journal_info)
4326                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4327         }
4328 }
4329
4330 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4331 {
4332         u64 bytes;
4333         int nr;
4334
4335         bytes = btrfs_calc_trans_metadata_size(root, 1);
4336         nr = (int)div64_u64(to_reclaim, bytes);
4337         if (!nr)
4338                 nr = 1;
4339         return nr;
4340 }
4341
4342 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4343
4344 /*
4345  * shrink metadata reservation for delalloc
4346  */
4347 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4348                             bool wait_ordered)
4349 {
4350         struct btrfs_block_rsv *block_rsv;
4351         struct btrfs_space_info *space_info;
4352         struct btrfs_trans_handle *trans;
4353         u64 delalloc_bytes;
4354         u64 max_reclaim;
4355         long time_left;
4356         unsigned long nr_pages;
4357         int loops;
4358         int items;
4359         enum btrfs_reserve_flush_enum flush;
4360
4361         /* Calc the number of the pages we need flush for space reservation */
4362         items = calc_reclaim_items_nr(root, to_reclaim);
4363         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4364
4365         trans = (struct btrfs_trans_handle *)current->journal_info;
4366         block_rsv = &root->fs_info->delalloc_block_rsv;
4367         space_info = block_rsv->space_info;
4368
4369         delalloc_bytes = percpu_counter_sum_positive(
4370                                                 &root->fs_info->delalloc_bytes);
4371         if (delalloc_bytes == 0) {
4372                 if (trans)
4373                         return;
4374                 if (wait_ordered)
4375                         btrfs_wait_ordered_roots(root->fs_info, items);
4376                 return;
4377         }
4378
4379         loops = 0;
4380         while (delalloc_bytes && loops < 3) {
4381                 max_reclaim = min(delalloc_bytes, to_reclaim);
4382                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4383                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4384                 /*
4385                  * We need to wait for the async pages to actually start before
4386                  * we do anything.
4387                  */
4388                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4389                 if (!max_reclaim)
4390                         goto skip_async;
4391
4392                 if (max_reclaim <= nr_pages)
4393                         max_reclaim = 0;
4394                 else
4395                         max_reclaim -= nr_pages;
4396
4397                 wait_event(root->fs_info->async_submit_wait,
4398                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4399                            (int)max_reclaim);
4400 skip_async:
4401                 if (!trans)
4402                         flush = BTRFS_RESERVE_FLUSH_ALL;
4403                 else
4404                         flush = BTRFS_RESERVE_NO_FLUSH;
4405                 spin_lock(&space_info->lock);
4406                 if (can_overcommit(root, space_info, orig, flush)) {
4407                         spin_unlock(&space_info->lock);
4408                         break;
4409                 }
4410                 spin_unlock(&space_info->lock);
4411
4412                 loops++;
4413                 if (wait_ordered && !trans) {
4414                         btrfs_wait_ordered_roots(root->fs_info, items);
4415                 } else {
4416                         time_left = schedule_timeout_killable(1);
4417                         if (time_left)
4418                                 break;
4419                 }
4420                 delalloc_bytes = percpu_counter_sum_positive(
4421                                                 &root->fs_info->delalloc_bytes);
4422         }
4423 }
4424
4425 /**
4426  * maybe_commit_transaction - possibly commit the transaction if its ok to
4427  * @root - the root we're allocating for
4428  * @bytes - the number of bytes we want to reserve
4429  * @force - force the commit
4430  *
4431  * This will check to make sure that committing the transaction will actually
4432  * get us somewhere and then commit the transaction if it does.  Otherwise it
4433  * will return -ENOSPC.
4434  */
4435 static int may_commit_transaction(struct btrfs_root *root,
4436                                   struct btrfs_space_info *space_info,
4437                                   u64 bytes, int force)
4438 {
4439         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4440         struct btrfs_trans_handle *trans;
4441
4442         trans = (struct btrfs_trans_handle *)current->journal_info;
4443         if (trans)
4444                 return -EAGAIN;
4445
4446         if (force)
4447                 goto commit;
4448
4449         /* See if there is enough pinned space to make this reservation */
4450         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4451                                    bytes) >= 0)
4452                 goto commit;
4453
4454         /*
4455          * See if there is some space in the delayed insertion reservation for
4456          * this reservation.
4457          */
4458         if (space_info != delayed_rsv->space_info)
4459                 return -ENOSPC;
4460
4461         spin_lock(&delayed_rsv->lock);
4462         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4463                                    bytes - delayed_rsv->size) >= 0) {
4464                 spin_unlock(&delayed_rsv->lock);
4465                 return -ENOSPC;
4466         }
4467         spin_unlock(&delayed_rsv->lock);
4468
4469 commit:
4470         trans = btrfs_join_transaction(root);
4471         if (IS_ERR(trans))
4472                 return -ENOSPC;
4473
4474         return btrfs_commit_transaction(trans, root);
4475 }
4476
4477 enum flush_state {
4478         FLUSH_DELAYED_ITEMS_NR  =       1,
4479         FLUSH_DELAYED_ITEMS     =       2,
4480         FLUSH_DELALLOC          =       3,
4481         FLUSH_DELALLOC_WAIT     =       4,
4482         ALLOC_CHUNK             =       5,
4483         COMMIT_TRANS            =       6,
4484 };
4485
4486 static int flush_space(struct btrfs_root *root,
4487                        struct btrfs_space_info *space_info, u64 num_bytes,
4488                        u64 orig_bytes, int state)
4489 {
4490         struct btrfs_trans_handle *trans;
4491         int nr;
4492         int ret = 0;
4493
4494         switch (state) {
4495         case FLUSH_DELAYED_ITEMS_NR:
4496         case FLUSH_DELAYED_ITEMS:
4497                 if (state == FLUSH_DELAYED_ITEMS_NR)
4498                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4499                 else
4500                         nr = -1;
4501
4502                 trans = btrfs_join_transaction(root);
4503                 if (IS_ERR(trans)) {
4504                         ret = PTR_ERR(trans);
4505                         break;
4506                 }
4507                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4508                 btrfs_end_transaction(trans, root);
4509                 break;
4510         case FLUSH_DELALLOC:
4511         case FLUSH_DELALLOC_WAIT:
4512                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4513                                 state == FLUSH_DELALLOC_WAIT);
4514                 break;
4515         case ALLOC_CHUNK:
4516                 trans = btrfs_join_transaction(root);
4517                 if (IS_ERR(trans)) {
4518                         ret = PTR_ERR(trans);
4519                         break;
4520                 }
4521                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4522                                      btrfs_get_alloc_profile(root, 0),
4523                                      CHUNK_ALLOC_NO_FORCE);
4524                 btrfs_end_transaction(trans, root);
4525                 if (ret == -ENOSPC)
4526                         ret = 0;
4527                 break;
4528         case COMMIT_TRANS:
4529                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4530                 break;
4531         default:
4532                 ret = -ENOSPC;
4533                 break;
4534         }
4535
4536         return ret;
4537 }
4538
4539 static inline u64
4540 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4541                                  struct btrfs_space_info *space_info)
4542 {
4543         u64 used;
4544         u64 expected;
4545         u64 to_reclaim;
4546
4547         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4548                                 16 * 1024 * 1024);
4549         spin_lock(&space_info->lock);
4550         if (can_overcommit(root, space_info, to_reclaim,
4551                            BTRFS_RESERVE_FLUSH_ALL)) {
4552                 to_reclaim = 0;
4553                 goto out;
4554         }
4555
4556         used = space_info->bytes_used + space_info->bytes_reserved +
4557                space_info->bytes_pinned + space_info->bytes_readonly +
4558                space_info->bytes_may_use;
4559         if (can_overcommit(root, space_info, 1024 * 1024,
4560                            BTRFS_RESERVE_FLUSH_ALL))
4561                 expected = div_factor_fine(space_info->total_bytes, 95);
4562         else
4563                 expected = div_factor_fine(space_info->total_bytes, 90);
4564
4565         if (used > expected)
4566                 to_reclaim = used - expected;
4567         else
4568                 to_reclaim = 0;
4569         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4570                                      space_info->bytes_reserved);
4571 out:
4572         spin_unlock(&space_info->lock);
4573
4574         return to_reclaim;
4575 }
4576
4577 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4578                                         struct btrfs_fs_info *fs_info, u64 used)
4579 {
4580         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4581
4582         /* If we're just plain full then async reclaim just slows us down. */
4583         if (space_info->bytes_used >= thresh)
4584                 return 0;
4585
4586         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4587                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4588 }
4589
4590 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4591                                        struct btrfs_fs_info *fs_info,
4592                                        int flush_state)
4593 {
4594         u64 used;
4595
4596         spin_lock(&space_info->lock);
4597         /*
4598          * We run out of space and have not got any free space via flush_space,
4599          * so don't bother doing async reclaim.
4600          */
4601         if (flush_state > COMMIT_TRANS && space_info->full) {
4602                 spin_unlock(&space_info->lock);
4603                 return 0;
4604         }
4605
4606         used = space_info->bytes_used + space_info->bytes_reserved +
4607                space_info->bytes_pinned + space_info->bytes_readonly +
4608                space_info->bytes_may_use;
4609         if (need_do_async_reclaim(space_info, fs_info, used)) {
4610                 spin_unlock(&space_info->lock);
4611                 return 1;
4612         }
4613         spin_unlock(&space_info->lock);
4614
4615         return 0;
4616 }
4617
4618 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4619 {
4620         struct btrfs_fs_info *fs_info;
4621         struct btrfs_space_info *space_info;
4622         u64 to_reclaim;
4623         int flush_state;
4624
4625         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4626         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4627
4628         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4629                                                       space_info);
4630         if (!to_reclaim)
4631                 return;
4632
4633         flush_state = FLUSH_DELAYED_ITEMS_NR;
4634         do {
4635                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4636                             to_reclaim, flush_state);
4637                 flush_state++;
4638                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4639                                                  flush_state))
4640                         return;
4641         } while (flush_state < COMMIT_TRANS);
4642 }
4643
4644 void btrfs_init_async_reclaim_work(struct work_struct *work)
4645 {
4646         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4647 }
4648
4649 /**
4650  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4651  * @root - the root we're allocating for
4652  * @block_rsv - the block_rsv we're allocating for
4653  * @orig_bytes - the number of bytes we want
4654  * @flush - whether or not we can flush to make our reservation
4655  *
4656  * This will reserve orgi_bytes number of bytes from the space info associated
4657  * with the block_rsv.  If there is not enough space it will make an attempt to
4658  * flush out space to make room.  It will do this by flushing delalloc if
4659  * possible or committing the transaction.  If flush is 0 then no attempts to
4660  * regain reservations will be made and this will fail if there is not enough
4661  * space already.
4662  */
4663 static int reserve_metadata_bytes(struct btrfs_root *root,
4664                                   struct btrfs_block_rsv *block_rsv,
4665                                   u64 orig_bytes,
4666                                   enum btrfs_reserve_flush_enum flush)
4667 {
4668         struct btrfs_space_info *space_info = block_rsv->space_info;
4669         u64 used;
4670         u64 num_bytes = orig_bytes;
4671         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4672         int ret = 0;
4673         bool flushing = false;
4674
4675 again:
4676         ret = 0;
4677         spin_lock(&space_info->lock);
4678         /*
4679          * We only want to wait if somebody other than us is flushing and we
4680          * are actually allowed to flush all things.
4681          */
4682         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4683                space_info->flush) {
4684                 spin_unlock(&space_info->lock);
4685                 /*
4686                  * If we have a trans handle we can't wait because the flusher
4687                  * may have to commit the transaction, which would mean we would
4688                  * deadlock since we are waiting for the flusher to finish, but
4689                  * hold the current transaction open.
4690                  */
4691                 if (current->journal_info)
4692                         return -EAGAIN;
4693                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4694                 /* Must have been killed, return */
4695                 if (ret)
4696                         return -EINTR;
4697
4698                 spin_lock(&space_info->lock);
4699         }
4700
4701         ret = -ENOSPC;
4702         used = space_info->bytes_used + space_info->bytes_reserved +
4703                 space_info->bytes_pinned + space_info->bytes_readonly +
4704                 space_info->bytes_may_use;
4705
4706         /*
4707          * The idea here is that we've not already over-reserved the block group
4708          * then we can go ahead and save our reservation first and then start
4709          * flushing if we need to.  Otherwise if we've already overcommitted
4710          * lets start flushing stuff first and then come back and try to make
4711          * our reservation.
4712          */
4713         if (used <= space_info->total_bytes) {
4714                 if (used + orig_bytes <= space_info->total_bytes) {
4715                         space_info->bytes_may_use += orig_bytes;
4716                         trace_btrfs_space_reservation(root->fs_info,
4717                                 "space_info", space_info->flags, orig_bytes, 1);
4718                         ret = 0;
4719                 } else {
4720                         /*
4721                          * Ok set num_bytes to orig_bytes since we aren't
4722                          * overocmmitted, this way we only try and reclaim what
4723                          * we need.
4724                          */
4725                         num_bytes = orig_bytes;
4726                 }
4727         } else {
4728                 /*
4729                  * Ok we're over committed, set num_bytes to the overcommitted
4730                  * amount plus the amount of bytes that we need for this
4731                  * reservation.
4732                  */
4733                 num_bytes = used - space_info->total_bytes +
4734                         (orig_bytes * 2);
4735         }
4736
4737         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4738                 space_info->bytes_may_use += orig_bytes;
4739                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4740                                               space_info->flags, orig_bytes,
4741                                               1);
4742                 ret = 0;
4743         }
4744
4745         /*
4746          * Couldn't make our reservation, save our place so while we're trying
4747          * to reclaim space we can actually use it instead of somebody else
4748          * stealing it from us.
4749          *
4750          * We make the other tasks wait for the flush only when we can flush
4751          * all things.
4752          */
4753         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4754                 flushing = true;
4755                 space_info->flush = 1;
4756         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4757                 used += orig_bytes;
4758                 /*
4759                  * We will do the space reservation dance during log replay,
4760                  * which means we won't have fs_info->fs_root set, so don't do
4761                  * the async reclaim as we will panic.
4762                  */
4763                 if (!root->fs_info->log_root_recovering &&
4764                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4765                     !work_busy(&root->fs_info->async_reclaim_work))
4766                         queue_work(system_unbound_wq,
4767                                    &root->fs_info->async_reclaim_work);
4768         }
4769         spin_unlock(&space_info->lock);
4770
4771         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4772                 goto out;
4773
4774         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4775                           flush_state);
4776         flush_state++;
4777
4778         /*
4779          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4780          * would happen. So skip delalloc flush.
4781          */
4782         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4783             (flush_state == FLUSH_DELALLOC ||
4784              flush_state == FLUSH_DELALLOC_WAIT))
4785                 flush_state = ALLOC_CHUNK;
4786
4787         if (!ret)
4788                 goto again;
4789         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4790                  flush_state < COMMIT_TRANS)
4791                 goto again;
4792         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4793                  flush_state <= COMMIT_TRANS)
4794                 goto again;
4795
4796 out:
4797         if (ret == -ENOSPC &&
4798             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4799                 struct btrfs_block_rsv *global_rsv =
4800                         &root->fs_info->global_block_rsv;
4801
4802                 if (block_rsv != global_rsv &&
4803                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4804                         ret = 0;
4805         }
4806         if (ret == -ENOSPC)
4807                 trace_btrfs_space_reservation(root->fs_info,
4808                                               "space_info:enospc",
4809                                               space_info->flags, orig_bytes, 1);
4810         if (flushing) {
4811                 spin_lock(&space_info->lock);
4812                 space_info->flush = 0;
4813                 wake_up_all(&space_info->wait);
4814                 spin_unlock(&space_info->lock);
4815         }
4816         return ret;
4817 }
4818
4819 static struct btrfs_block_rsv *get_block_rsv(
4820                                         const struct btrfs_trans_handle *trans,
4821                                         const struct btrfs_root *root)
4822 {
4823         struct btrfs_block_rsv *block_rsv = NULL;
4824
4825         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4826                 block_rsv = trans->block_rsv;
4827
4828         if (root == root->fs_info->csum_root && trans->adding_csums)
4829                 block_rsv = trans->block_rsv;
4830
4831         if (root == root->fs_info->uuid_root)
4832                 block_rsv = trans->block_rsv;
4833
4834         if (!block_rsv)
4835                 block_rsv = root->block_rsv;
4836
4837         if (!block_rsv)
4838                 block_rsv = &root->fs_info->empty_block_rsv;
4839
4840         return block_rsv;
4841 }
4842
4843 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4844                                u64 num_bytes)
4845 {
4846         int ret = -ENOSPC;
4847         spin_lock(&block_rsv->lock);
4848         if (block_rsv->reserved >= num_bytes) {
4849                 block_rsv->reserved -= num_bytes;
4850                 if (block_rsv->reserved < block_rsv->size)
4851                         block_rsv->full = 0;
4852                 ret = 0;
4853         }
4854         spin_unlock(&block_rsv->lock);
4855         return ret;
4856 }
4857
4858 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4859                                 u64 num_bytes, int update_size)
4860 {
4861         spin_lock(&block_rsv->lock);
4862         block_rsv->reserved += num_bytes;
4863         if (update_size)
4864                 block_rsv->size += num_bytes;
4865         else if (block_rsv->reserved >= block_rsv->size)
4866                 block_rsv->full = 1;
4867         spin_unlock(&block_rsv->lock);
4868 }
4869
4870 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4871                              struct btrfs_block_rsv *dest, u64 num_bytes,
4872                              int min_factor)
4873 {
4874         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4875         u64 min_bytes;
4876
4877         if (global_rsv->space_info != dest->space_info)
4878                 return -ENOSPC;
4879
4880         spin_lock(&global_rsv->lock);
4881         min_bytes = div_factor(global_rsv->size, min_factor);
4882         if (global_rsv->reserved < min_bytes + num_bytes) {
4883                 spin_unlock(&global_rsv->lock);
4884                 return -ENOSPC;
4885         }
4886         global_rsv->reserved -= num_bytes;
4887         if (global_rsv->reserved < global_rsv->size)
4888                 global_rsv->full = 0;
4889         spin_unlock(&global_rsv->lock);
4890
4891         block_rsv_add_bytes(dest, num_bytes, 1);
4892         return 0;
4893 }
4894
4895 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4896                                     struct btrfs_block_rsv *block_rsv,
4897                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4898 {
4899         struct btrfs_space_info *space_info = block_rsv->space_info;
4900
4901         spin_lock(&block_rsv->lock);
4902         if (num_bytes == (u64)-1)
4903                 num_bytes = block_rsv->size;
4904         block_rsv->size -= num_bytes;
4905         if (block_rsv->reserved >= block_rsv->size) {
4906                 num_bytes = block_rsv->reserved - block_rsv->size;
4907                 block_rsv->reserved = block_rsv->size;
4908                 block_rsv->full = 1;
4909         } else {
4910                 num_bytes = 0;
4911         }
4912         spin_unlock(&block_rsv->lock);
4913
4914         if (num_bytes > 0) {
4915                 if (dest) {
4916                         spin_lock(&dest->lock);
4917                         if (!dest->full) {
4918                                 u64 bytes_to_add;
4919
4920                                 bytes_to_add = dest->size - dest->reserved;
4921                                 bytes_to_add = min(num_bytes, bytes_to_add);
4922                                 dest->reserved += bytes_to_add;
4923                                 if (dest->reserved >= dest->size)
4924                                         dest->full = 1;
4925                                 num_bytes -= bytes_to_add;
4926                         }
4927                         spin_unlock(&dest->lock);
4928                 }
4929                 if (num_bytes) {
4930                         spin_lock(&space_info->lock);
4931                         space_info->bytes_may_use -= num_bytes;
4932                         trace_btrfs_space_reservation(fs_info, "space_info",
4933                                         space_info->flags, num_bytes, 0);
4934                         spin_unlock(&space_info->lock);
4935                 }
4936         }
4937 }
4938
4939 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4940                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4941 {
4942         int ret;
4943
4944         ret = block_rsv_use_bytes(src, num_bytes);
4945         if (ret)
4946                 return ret;
4947
4948         block_rsv_add_bytes(dst, num_bytes, 1);
4949         return 0;
4950 }
4951
4952 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4953 {
4954         memset(rsv, 0, sizeof(*rsv));
4955         spin_lock_init(&rsv->lock);
4956         rsv->type = type;
4957 }
4958
4959 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4960                                               unsigned short type)
4961 {
4962         struct btrfs_block_rsv *block_rsv;
4963         struct btrfs_fs_info *fs_info = root->fs_info;
4964
4965         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4966         if (!block_rsv)
4967                 return NULL;
4968
4969         btrfs_init_block_rsv(block_rsv, type);
4970         block_rsv->space_info = __find_space_info(fs_info,
4971                                                   BTRFS_BLOCK_GROUP_METADATA);
4972         return block_rsv;
4973 }
4974
4975 void btrfs_free_block_rsv(struct btrfs_root *root,
4976                           struct btrfs_block_rsv *rsv)
4977 {
4978         if (!rsv)
4979                 return;
4980         btrfs_block_rsv_release(root, rsv, (u64)-1);
4981         kfree(rsv);
4982 }
4983
4984 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
4985 {
4986         kfree(rsv);
4987 }
4988
4989 int btrfs_block_rsv_add(struct btrfs_root *root,
4990                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4991                         enum btrfs_reserve_flush_enum flush)
4992 {
4993         int ret;
4994
4995         if (num_bytes == 0)
4996                 return 0;
4997
4998         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4999         if (!ret) {
5000                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5001                 return 0;
5002         }
5003
5004         return ret;
5005 }
5006
5007 int btrfs_block_rsv_check(struct btrfs_root *root,
5008                           struct btrfs_block_rsv *block_rsv, int min_factor)
5009 {
5010         u64 num_bytes = 0;
5011         int ret = -ENOSPC;
5012
5013         if (!block_rsv)
5014                 return 0;
5015
5016         spin_lock(&block_rsv->lock);
5017         num_bytes = div_factor(block_rsv->size, min_factor);
5018         if (block_rsv->reserved >= num_bytes)
5019                 ret = 0;
5020         spin_unlock(&block_rsv->lock);
5021
5022         return ret;
5023 }
5024
5025 int btrfs_block_rsv_refill(struct btrfs_root *root,
5026                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5027                            enum btrfs_reserve_flush_enum flush)
5028 {
5029         u64 num_bytes = 0;
5030         int ret = -ENOSPC;
5031
5032         if (!block_rsv)
5033                 return 0;
5034
5035         spin_lock(&block_rsv->lock);
5036         num_bytes = min_reserved;
5037         if (block_rsv->reserved >= num_bytes)
5038                 ret = 0;
5039         else
5040                 num_bytes -= block_rsv->reserved;
5041         spin_unlock(&block_rsv->lock);
5042
5043         if (!ret)
5044                 return 0;
5045
5046         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5047         if (!ret) {
5048                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5049                 return 0;
5050         }
5051
5052         return ret;
5053 }
5054
5055 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5056                             struct btrfs_block_rsv *dst_rsv,
5057                             u64 num_bytes)
5058 {
5059         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5060 }
5061
5062 void btrfs_block_rsv_release(struct btrfs_root *root,
5063                              struct btrfs_block_rsv *block_rsv,
5064                              u64 num_bytes)
5065 {
5066         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5067         if (global_rsv == block_rsv ||
5068             block_rsv->space_info != global_rsv->space_info)
5069                 global_rsv = NULL;
5070         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5071                                 num_bytes);
5072 }
5073
5074 /*
5075  * helper to calculate size of global block reservation.
5076  * the desired value is sum of space used by extent tree,
5077  * checksum tree and root tree
5078  */
5079 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5080 {
5081         struct btrfs_space_info *sinfo;
5082         u64 num_bytes;
5083         u64 meta_used;
5084         u64 data_used;
5085         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5086
5087         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5088         spin_lock(&sinfo->lock);
5089         data_used = sinfo->bytes_used;
5090         spin_unlock(&sinfo->lock);
5091
5092         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5093         spin_lock(&sinfo->lock);
5094         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5095                 data_used = 0;
5096         meta_used = sinfo->bytes_used;
5097         spin_unlock(&sinfo->lock);
5098
5099         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5100                     csum_size * 2;
5101         num_bytes += div_u64(data_used + meta_used, 50);
5102
5103         if (num_bytes * 3 > meta_used)
5104                 num_bytes = div_u64(meta_used, 3);
5105
5106         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5107 }
5108
5109 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5110 {
5111         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5112         struct btrfs_space_info *sinfo = block_rsv->space_info;
5113         u64 num_bytes;
5114
5115         num_bytes = calc_global_metadata_size(fs_info);
5116
5117         spin_lock(&sinfo->lock);
5118         spin_lock(&block_rsv->lock);
5119
5120         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5121
5122         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5123                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5124                     sinfo->bytes_may_use;
5125
5126         if (sinfo->total_bytes > num_bytes) {
5127                 num_bytes = sinfo->total_bytes - num_bytes;
5128                 block_rsv->reserved += num_bytes;
5129                 sinfo->bytes_may_use += num_bytes;
5130                 trace_btrfs_space_reservation(fs_info, "space_info",
5131                                       sinfo->flags, num_bytes, 1);
5132         }
5133
5134         if (block_rsv->reserved >= block_rsv->size) {
5135                 num_bytes = block_rsv->reserved - block_rsv->size;
5136                 sinfo->bytes_may_use -= num_bytes;
5137                 trace_btrfs_space_reservation(fs_info, "space_info",
5138                                       sinfo->flags, num_bytes, 0);
5139                 block_rsv->reserved = block_rsv->size;
5140                 block_rsv->full = 1;
5141         }
5142
5143         spin_unlock(&block_rsv->lock);
5144         spin_unlock(&sinfo->lock);
5145 }
5146
5147 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5148 {
5149         struct btrfs_space_info *space_info;
5150
5151         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5152         fs_info->chunk_block_rsv.space_info = space_info;
5153
5154         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5155         fs_info->global_block_rsv.space_info = space_info;
5156         fs_info->delalloc_block_rsv.space_info = space_info;
5157         fs_info->trans_block_rsv.space_info = space_info;
5158         fs_info->empty_block_rsv.space_info = space_info;
5159         fs_info->delayed_block_rsv.space_info = space_info;
5160
5161         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5162         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5163         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5164         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5165         if (fs_info->quota_root)
5166                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5167         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5168
5169         update_global_block_rsv(fs_info);
5170 }
5171
5172 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5173 {
5174         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5175                                 (u64)-1);
5176         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5177         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5178         WARN_ON(fs_info->trans_block_rsv.size > 0);
5179         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5180         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5181         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5182         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5183         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5184 }
5185
5186 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5187                                   struct btrfs_root *root)
5188 {
5189         if (!trans->block_rsv)
5190                 return;
5191
5192         if (!trans->bytes_reserved)
5193                 return;
5194
5195         trace_btrfs_space_reservation(root->fs_info, "transaction",
5196                                       trans->transid, trans->bytes_reserved, 0);
5197         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5198         trans->bytes_reserved = 0;
5199 }
5200
5201 /*
5202  * To be called after all the new block groups attached to the transaction
5203  * handle have been created (btrfs_create_pending_block_groups()).
5204  */
5205 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5206 {
5207         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5208
5209         if (!trans->chunk_bytes_reserved)
5210                 return;
5211
5212         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5213
5214         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5215                                 trans->chunk_bytes_reserved);
5216         trans->chunk_bytes_reserved = 0;
5217 }
5218
5219 /* Can only return 0 or -ENOSPC */
5220 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5221                                   struct inode *inode)
5222 {
5223         struct btrfs_root *root = BTRFS_I(inode)->root;
5224         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5225         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5226
5227         /*
5228          * We need to hold space in order to delete our orphan item once we've
5229          * added it, so this takes the reservation so we can release it later
5230          * when we are truly done with the orphan item.
5231          */
5232         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5233         trace_btrfs_space_reservation(root->fs_info, "orphan",
5234                                       btrfs_ino(inode), num_bytes, 1);
5235         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5236 }
5237
5238 void btrfs_orphan_release_metadata(struct inode *inode)
5239 {
5240         struct btrfs_root *root = BTRFS_I(inode)->root;
5241         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5242         trace_btrfs_space_reservation(root->fs_info, "orphan",
5243                                       btrfs_ino(inode), num_bytes, 0);
5244         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5245 }
5246
5247 /*
5248  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5249  * root: the root of the parent directory
5250  * rsv: block reservation
5251  * items: the number of items that we need do reservation
5252  * qgroup_reserved: used to return the reserved size in qgroup
5253  *
5254  * This function is used to reserve the space for snapshot/subvolume
5255  * creation and deletion. Those operations are different with the
5256  * common file/directory operations, they change two fs/file trees
5257  * and root tree, the number of items that the qgroup reserves is
5258  * different with the free space reservation. So we can not use
5259  * the space reseravtion mechanism in start_transaction().
5260  */
5261 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5262                                      struct btrfs_block_rsv *rsv,
5263                                      int items,
5264                                      u64 *qgroup_reserved,
5265                                      bool use_global_rsv)
5266 {
5267         u64 num_bytes;
5268         int ret;
5269         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5270
5271         if (root->fs_info->quota_enabled) {
5272                 /* One for parent inode, two for dir entries */
5273                 num_bytes = 3 * root->nodesize;
5274                 ret = btrfs_qgroup_reserve(root, num_bytes);
5275                 if (ret)
5276                         return ret;
5277         } else {
5278                 num_bytes = 0;
5279         }
5280
5281         *qgroup_reserved = num_bytes;
5282
5283         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5284         rsv->space_info = __find_space_info(root->fs_info,
5285                                             BTRFS_BLOCK_GROUP_METADATA);
5286         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5287                                   BTRFS_RESERVE_FLUSH_ALL);
5288
5289         if (ret == -ENOSPC && use_global_rsv)
5290                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5291
5292         if (ret) {
5293                 if (*qgroup_reserved)
5294                         btrfs_qgroup_free(root, *qgroup_reserved);
5295         }
5296
5297         return ret;
5298 }
5299
5300 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5301                                       struct btrfs_block_rsv *rsv,
5302                                       u64 qgroup_reserved)
5303 {
5304         btrfs_block_rsv_release(root, rsv, (u64)-1);
5305 }
5306
5307 /**
5308  * drop_outstanding_extent - drop an outstanding extent
5309  * @inode: the inode we're dropping the extent for
5310  * @num_bytes: the number of bytes we're relaseing.
5311  *
5312  * This is called when we are freeing up an outstanding extent, either called
5313  * after an error or after an extent is written.  This will return the number of
5314  * reserved extents that need to be freed.  This must be called with
5315  * BTRFS_I(inode)->lock held.
5316  */
5317 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5318 {
5319         unsigned drop_inode_space = 0;
5320         unsigned dropped_extents = 0;
5321         unsigned num_extents = 0;
5322
5323         num_extents = (unsigned)div64_u64(num_bytes +
5324                                           BTRFS_MAX_EXTENT_SIZE - 1,
5325                                           BTRFS_MAX_EXTENT_SIZE);
5326         ASSERT(num_extents);
5327         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5328         BTRFS_I(inode)->outstanding_extents -= num_extents;
5329
5330         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5331             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5332                                &BTRFS_I(inode)->runtime_flags))
5333                 drop_inode_space = 1;
5334
5335         /*
5336          * If we have more or the same amount of outsanding extents than we have
5337          * reserved then we need to leave the reserved extents count alone.
5338          */
5339         if (BTRFS_I(inode)->outstanding_extents >=
5340             BTRFS_I(inode)->reserved_extents)
5341                 return drop_inode_space;
5342
5343         dropped_extents = BTRFS_I(inode)->reserved_extents -
5344                 BTRFS_I(inode)->outstanding_extents;
5345         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5346         return dropped_extents + drop_inode_space;
5347 }
5348
5349 /**
5350  * calc_csum_metadata_size - return the amount of metada space that must be
5351  *      reserved/free'd for the given bytes.
5352  * @inode: the inode we're manipulating
5353  * @num_bytes: the number of bytes in question
5354  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5355  *
5356  * This adjusts the number of csum_bytes in the inode and then returns the
5357  * correct amount of metadata that must either be reserved or freed.  We
5358  * calculate how many checksums we can fit into one leaf and then divide the
5359  * number of bytes that will need to be checksumed by this value to figure out
5360  * how many checksums will be required.  If we are adding bytes then the number
5361  * may go up and we will return the number of additional bytes that must be
5362  * reserved.  If it is going down we will return the number of bytes that must
5363  * be freed.
5364  *
5365  * This must be called with BTRFS_I(inode)->lock held.
5366  */
5367 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5368                                    int reserve)
5369 {
5370         struct btrfs_root *root = BTRFS_I(inode)->root;
5371         u64 old_csums, num_csums;
5372
5373         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5374             BTRFS_I(inode)->csum_bytes == 0)
5375                 return 0;
5376
5377         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5378         if (reserve)
5379                 BTRFS_I(inode)->csum_bytes += num_bytes;
5380         else
5381                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5382         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5383
5384         /* No change, no need to reserve more */
5385         if (old_csums == num_csums)
5386                 return 0;
5387
5388         if (reserve)
5389                 return btrfs_calc_trans_metadata_size(root,
5390                                                       num_csums - old_csums);
5391
5392         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5393 }
5394
5395 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5396 {
5397         struct btrfs_root *root = BTRFS_I(inode)->root;
5398         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5399         u64 to_reserve = 0;
5400         u64 csum_bytes;
5401         unsigned nr_extents = 0;
5402         int extra_reserve = 0;
5403         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5404         int ret = 0;
5405         bool delalloc_lock = true;
5406         u64 to_free = 0;
5407         unsigned dropped;
5408
5409         /* If we are a free space inode we need to not flush since we will be in
5410          * the middle of a transaction commit.  We also don't need the delalloc
5411          * mutex since we won't race with anybody.  We need this mostly to make
5412          * lockdep shut its filthy mouth.
5413          */
5414         if (btrfs_is_free_space_inode(inode)) {
5415                 flush = BTRFS_RESERVE_NO_FLUSH;
5416                 delalloc_lock = false;
5417         }
5418
5419         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5420             btrfs_transaction_in_commit(root->fs_info))
5421                 schedule_timeout(1);
5422
5423         if (delalloc_lock)
5424                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5425
5426         num_bytes = ALIGN(num_bytes, root->sectorsize);
5427
5428         spin_lock(&BTRFS_I(inode)->lock);
5429         nr_extents = (unsigned)div64_u64(num_bytes +
5430                                          BTRFS_MAX_EXTENT_SIZE - 1,
5431                                          BTRFS_MAX_EXTENT_SIZE);
5432         BTRFS_I(inode)->outstanding_extents += nr_extents;
5433         nr_extents = 0;
5434
5435         if (BTRFS_I(inode)->outstanding_extents >
5436             BTRFS_I(inode)->reserved_extents)
5437                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5438                         BTRFS_I(inode)->reserved_extents;
5439
5440         /*
5441          * Add an item to reserve for updating the inode when we complete the
5442          * delalloc io.
5443          */
5444         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5445                       &BTRFS_I(inode)->runtime_flags)) {
5446                 nr_extents++;
5447                 extra_reserve = 1;
5448         }
5449
5450         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5451         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5452         csum_bytes = BTRFS_I(inode)->csum_bytes;
5453         spin_unlock(&BTRFS_I(inode)->lock);
5454
5455         if (root->fs_info->quota_enabled) {
5456                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5457                 if (ret)
5458                         goto out_fail;
5459         }
5460
5461         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5462         if (unlikely(ret)) {
5463                 if (root->fs_info->quota_enabled)
5464                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5465                 goto out_fail;
5466         }
5467
5468         spin_lock(&BTRFS_I(inode)->lock);
5469         if (extra_reserve) {
5470                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5471                         &BTRFS_I(inode)->runtime_flags);
5472                 nr_extents--;
5473         }
5474         BTRFS_I(inode)->reserved_extents += nr_extents;
5475         spin_unlock(&BTRFS_I(inode)->lock);
5476
5477         if (delalloc_lock)
5478                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5479
5480         if (to_reserve)
5481                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5482                                               btrfs_ino(inode), to_reserve, 1);
5483         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5484
5485         return 0;
5486
5487 out_fail:
5488         spin_lock(&BTRFS_I(inode)->lock);
5489         dropped = drop_outstanding_extent(inode, num_bytes);
5490         /*
5491          * If the inodes csum_bytes is the same as the original
5492          * csum_bytes then we know we haven't raced with any free()ers
5493          * so we can just reduce our inodes csum bytes and carry on.
5494          */
5495         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5496                 calc_csum_metadata_size(inode, num_bytes, 0);
5497         } else {
5498                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5499                 u64 bytes;
5500
5501                 /*
5502                  * This is tricky, but first we need to figure out how much we
5503                  * free'd from any free-ers that occured during this
5504                  * reservation, so we reset ->csum_bytes to the csum_bytes
5505                  * before we dropped our lock, and then call the free for the
5506                  * number of bytes that were freed while we were trying our
5507                  * reservation.
5508                  */
5509                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5510                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5511                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5512
5513
5514                 /*
5515                  * Now we need to see how much we would have freed had we not
5516                  * been making this reservation and our ->csum_bytes were not
5517                  * artificially inflated.
5518                  */
5519                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5520                 bytes = csum_bytes - orig_csum_bytes;
5521                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5522
5523                 /*
5524                  * Now reset ->csum_bytes to what it should be.  If bytes is
5525                  * more than to_free then we would have free'd more space had we
5526                  * not had an artificially high ->csum_bytes, so we need to free
5527                  * the remainder.  If bytes is the same or less then we don't
5528                  * need to do anything, the other free-ers did the correct
5529                  * thing.
5530                  */
5531                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5532                 if (bytes > to_free)
5533                         to_free = bytes - to_free;
5534                 else
5535                         to_free = 0;
5536         }
5537         spin_unlock(&BTRFS_I(inode)->lock);
5538         if (dropped)
5539                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5540
5541         if (to_free) {
5542                 btrfs_block_rsv_release(root, block_rsv, to_free);
5543                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5544                                               btrfs_ino(inode), to_free, 0);
5545         }
5546         if (delalloc_lock)
5547                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5548         return ret;
5549 }
5550
5551 /**
5552  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5553  * @inode: the inode to release the reservation for
5554  * @num_bytes: the number of bytes we're releasing
5555  *
5556  * This will release the metadata reservation for an inode.  This can be called
5557  * once we complete IO for a given set of bytes to release their metadata
5558  * reservations.
5559  */
5560 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5561 {
5562         struct btrfs_root *root = BTRFS_I(inode)->root;
5563         u64 to_free = 0;
5564         unsigned dropped;
5565
5566         num_bytes = ALIGN(num_bytes, root->sectorsize);
5567         spin_lock(&BTRFS_I(inode)->lock);
5568         dropped = drop_outstanding_extent(inode, num_bytes);
5569
5570         if (num_bytes)
5571                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5572         spin_unlock(&BTRFS_I(inode)->lock);
5573         if (dropped > 0)
5574                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5575
5576         if (btrfs_test_is_dummy_root(root))
5577                 return;
5578
5579         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5580                                       btrfs_ino(inode), to_free, 0);
5581
5582         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5583                                 to_free);
5584 }
5585
5586 /**
5587  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5588  * @inode: inode we're writing to
5589  * @num_bytes: the number of bytes we want to allocate
5590  *
5591  * This will do the following things
5592  *
5593  * o reserve space in the data space info for num_bytes
5594  * o reserve space in the metadata space info based on number of outstanding
5595  *   extents and how much csums will be needed
5596  * o add to the inodes ->delalloc_bytes
5597  * o add it to the fs_info's delalloc inodes list.
5598  *
5599  * This will return 0 for success and -ENOSPC if there is no space left.
5600  */
5601 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5602 {
5603         int ret;
5604
5605         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5606         if (ret)
5607                 return ret;
5608
5609         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5610         if (ret) {
5611                 btrfs_free_reserved_data_space(inode, num_bytes);
5612                 return ret;
5613         }
5614
5615         return 0;
5616 }
5617
5618 /**
5619  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5620  * @inode: inode we're releasing space for
5621  * @num_bytes: the number of bytes we want to free up
5622  *
5623  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5624  * called in the case that we don't need the metadata AND data reservations
5625  * anymore.  So if there is an error or we insert an inline extent.
5626  *
5627  * This function will release the metadata space that was not used and will
5628  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5629  * list if there are no delalloc bytes left.
5630  */
5631 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5632 {
5633         btrfs_delalloc_release_metadata(inode, num_bytes);
5634         btrfs_free_reserved_data_space(inode, num_bytes);
5635 }
5636
5637 static int update_block_group(struct btrfs_trans_handle *trans,
5638                               struct btrfs_root *root, u64 bytenr,
5639                               u64 num_bytes, int alloc)
5640 {
5641         struct btrfs_block_group_cache *cache = NULL;
5642         struct btrfs_fs_info *info = root->fs_info;
5643         u64 total = num_bytes;
5644         u64 old_val;
5645         u64 byte_in_group;
5646         int factor;
5647
5648         /* block accounting for super block */
5649         spin_lock(&info->delalloc_root_lock);
5650         old_val = btrfs_super_bytes_used(info->super_copy);
5651         if (alloc)
5652                 old_val += num_bytes;
5653         else
5654                 old_val -= num_bytes;
5655         btrfs_set_super_bytes_used(info->super_copy, old_val);
5656         spin_unlock(&info->delalloc_root_lock);
5657
5658         while (total) {
5659                 cache = btrfs_lookup_block_group(info, bytenr);
5660                 if (!cache)
5661                         return -ENOENT;
5662                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5663                                     BTRFS_BLOCK_GROUP_RAID1 |
5664                                     BTRFS_BLOCK_GROUP_RAID10))
5665                         factor = 2;
5666                 else
5667                         factor = 1;
5668                 /*
5669                  * If this block group has free space cache written out, we
5670                  * need to make sure to load it if we are removing space.  This
5671                  * is because we need the unpinning stage to actually add the
5672                  * space back to the block group, otherwise we will leak space.
5673                  */
5674                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5675                         cache_block_group(cache, 1);
5676
5677                 byte_in_group = bytenr - cache->key.objectid;
5678                 WARN_ON(byte_in_group > cache->key.offset);
5679
5680                 spin_lock(&cache->space_info->lock);
5681                 spin_lock(&cache->lock);
5682
5683                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5684                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5685                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5686
5687                 old_val = btrfs_block_group_used(&cache->item);
5688                 num_bytes = min(total, cache->key.offset - byte_in_group);
5689                 if (alloc) {
5690                         old_val += num_bytes;
5691                         btrfs_set_block_group_used(&cache->item, old_val);
5692                         cache->reserved -= num_bytes;
5693                         cache->space_info->bytes_reserved -= num_bytes;
5694                         cache->space_info->bytes_used += num_bytes;
5695                         cache->space_info->disk_used += num_bytes * factor;
5696                         spin_unlock(&cache->lock);
5697                         spin_unlock(&cache->space_info->lock);
5698                 } else {
5699                         old_val -= num_bytes;
5700                         btrfs_set_block_group_used(&cache->item, old_val);
5701                         cache->pinned += num_bytes;
5702                         cache->space_info->bytes_pinned += num_bytes;
5703                         cache->space_info->bytes_used -= num_bytes;
5704                         cache->space_info->disk_used -= num_bytes * factor;
5705                         spin_unlock(&cache->lock);
5706                         spin_unlock(&cache->space_info->lock);
5707
5708                         set_extent_dirty(info->pinned_extents,
5709                                          bytenr, bytenr + num_bytes - 1,
5710                                          GFP_NOFS | __GFP_NOFAIL);
5711                         /*
5712                          * No longer have used bytes in this block group, queue
5713                          * it for deletion.
5714                          */
5715                         if (old_val == 0) {
5716                                 spin_lock(&info->unused_bgs_lock);
5717                                 if (list_empty(&cache->bg_list)) {
5718                                         btrfs_get_block_group(cache);
5719                                         list_add_tail(&cache->bg_list,
5720                                                       &info->unused_bgs);
5721                                 }
5722                                 spin_unlock(&info->unused_bgs_lock);
5723                         }
5724                 }
5725
5726                 spin_lock(&trans->transaction->dirty_bgs_lock);
5727                 if (list_empty(&cache->dirty_list)) {
5728                         list_add_tail(&cache->dirty_list,
5729                                       &trans->transaction->dirty_bgs);
5730                                 trans->transaction->num_dirty_bgs++;
5731                         btrfs_get_block_group(cache);
5732                 }
5733                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5734
5735                 btrfs_put_block_group(cache);
5736                 total -= num_bytes;
5737                 bytenr += num_bytes;
5738         }
5739         return 0;
5740 }
5741
5742 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5743 {
5744         struct btrfs_block_group_cache *cache;
5745         u64 bytenr;
5746
5747         spin_lock(&root->fs_info->block_group_cache_lock);
5748         bytenr = root->fs_info->first_logical_byte;
5749         spin_unlock(&root->fs_info->block_group_cache_lock);
5750
5751         if (bytenr < (u64)-1)
5752                 return bytenr;
5753
5754         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5755         if (!cache)
5756                 return 0;
5757
5758         bytenr = cache->key.objectid;
5759         btrfs_put_block_group(cache);
5760
5761         return bytenr;
5762 }
5763
5764 static int pin_down_extent(struct btrfs_root *root,
5765                            struct btrfs_block_group_cache *cache,
5766                            u64 bytenr, u64 num_bytes, int reserved)
5767 {
5768         spin_lock(&cache->space_info->lock);
5769         spin_lock(&cache->lock);
5770         cache->pinned += num_bytes;
5771         cache->space_info->bytes_pinned += num_bytes;
5772         if (reserved) {
5773                 cache->reserved -= num_bytes;
5774                 cache->space_info->bytes_reserved -= num_bytes;
5775         }
5776         spin_unlock(&cache->lock);
5777         spin_unlock(&cache->space_info->lock);
5778
5779         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5780                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5781         if (reserved)
5782                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5783         return 0;
5784 }
5785
5786 /*
5787  * this function must be called within transaction
5788  */
5789 int btrfs_pin_extent(struct btrfs_root *root,
5790                      u64 bytenr, u64 num_bytes, int reserved)
5791 {
5792         struct btrfs_block_group_cache *cache;
5793
5794         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5795         BUG_ON(!cache); /* Logic error */
5796
5797         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5798
5799         btrfs_put_block_group(cache);
5800         return 0;
5801 }
5802
5803 /*
5804  * this function must be called within transaction
5805  */
5806 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5807                                     u64 bytenr, u64 num_bytes)
5808 {
5809         struct btrfs_block_group_cache *cache;
5810         int ret;
5811
5812         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5813         if (!cache)
5814                 return -EINVAL;
5815
5816         /*
5817          * pull in the free space cache (if any) so that our pin
5818          * removes the free space from the cache.  We have load_only set
5819          * to one because the slow code to read in the free extents does check
5820          * the pinned extents.
5821          */
5822         cache_block_group(cache, 1);
5823
5824         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5825
5826         /* remove us from the free space cache (if we're there at all) */
5827         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5828         btrfs_put_block_group(cache);
5829         return ret;
5830 }
5831
5832 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5833 {
5834         int ret;
5835         struct btrfs_block_group_cache *block_group;
5836         struct btrfs_caching_control *caching_ctl;
5837
5838         block_group = btrfs_lookup_block_group(root->fs_info, start);
5839         if (!block_group)
5840                 return -EINVAL;
5841
5842         cache_block_group(block_group, 0);
5843         caching_ctl = get_caching_control(block_group);
5844
5845         if (!caching_ctl) {
5846                 /* Logic error */
5847                 BUG_ON(!block_group_cache_done(block_group));
5848                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5849         } else {
5850                 mutex_lock(&caching_ctl->mutex);
5851
5852                 if (start >= caching_ctl->progress) {
5853                         ret = add_excluded_extent(root, start, num_bytes);
5854                 } else if (start + num_bytes <= caching_ctl->progress) {
5855                         ret = btrfs_remove_free_space(block_group,
5856                                                       start, num_bytes);
5857                 } else {
5858                         num_bytes = caching_ctl->progress - start;
5859                         ret = btrfs_remove_free_space(block_group,
5860                                                       start, num_bytes);
5861                         if (ret)
5862                                 goto out_lock;
5863
5864                         num_bytes = (start + num_bytes) -
5865                                 caching_ctl->progress;
5866                         start = caching_ctl->progress;
5867                         ret = add_excluded_extent(root, start, num_bytes);
5868                 }
5869 out_lock:
5870                 mutex_unlock(&caching_ctl->mutex);
5871                 put_caching_control(caching_ctl);
5872         }
5873         btrfs_put_block_group(block_group);
5874         return ret;
5875 }
5876
5877 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5878                                  struct extent_buffer *eb)
5879 {
5880         struct btrfs_file_extent_item *item;
5881         struct btrfs_key key;
5882         int found_type;
5883         int i;
5884
5885         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5886                 return 0;
5887
5888         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5889                 btrfs_item_key_to_cpu(eb, &key, i);
5890                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5891                         continue;
5892                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5893                 found_type = btrfs_file_extent_type(eb, item);
5894                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5895                         continue;
5896                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5897                         continue;
5898                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5899                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5900                 __exclude_logged_extent(log, key.objectid, key.offset);
5901         }
5902
5903         return 0;
5904 }
5905
5906 /**
5907  * btrfs_update_reserved_bytes - update the block_group and space info counters
5908  * @cache:      The cache we are manipulating
5909  * @num_bytes:  The number of bytes in question
5910  * @reserve:    One of the reservation enums
5911  * @delalloc:   The blocks are allocated for the delalloc write
5912  *
5913  * This is called by the allocator when it reserves space, or by somebody who is
5914  * freeing space that was never actually used on disk.  For example if you
5915  * reserve some space for a new leaf in transaction A and before transaction A
5916  * commits you free that leaf, you call this with reserve set to 0 in order to
5917  * clear the reservation.
5918  *
5919  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5920  * ENOSPC accounting.  For data we handle the reservation through clearing the
5921  * delalloc bits in the io_tree.  We have to do this since we could end up
5922  * allocating less disk space for the amount of data we have reserved in the
5923  * case of compression.
5924  *
5925  * If this is a reservation and the block group has become read only we cannot
5926  * make the reservation and return -EAGAIN, otherwise this function always
5927  * succeeds.
5928  */
5929 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5930                                        u64 num_bytes, int reserve, int delalloc)
5931 {
5932         struct btrfs_space_info *space_info = cache->space_info;
5933         int ret = 0;
5934
5935         spin_lock(&space_info->lock);
5936         spin_lock(&cache->lock);
5937         if (reserve != RESERVE_FREE) {
5938                 if (cache->ro) {
5939                         ret = -EAGAIN;
5940                 } else {
5941                         cache->reserved += num_bytes;
5942                         space_info->bytes_reserved += num_bytes;
5943                         if (reserve == RESERVE_ALLOC) {
5944                                 trace_btrfs_space_reservation(cache->fs_info,
5945                                                 "space_info", space_info->flags,
5946                                                 num_bytes, 0);
5947                                 space_info->bytes_may_use -= num_bytes;
5948                         }
5949
5950                         if (delalloc)
5951                                 cache->delalloc_bytes += num_bytes;
5952                 }
5953         } else {
5954                 if (cache->ro)
5955                         space_info->bytes_readonly += num_bytes;
5956                 cache->reserved -= num_bytes;
5957                 space_info->bytes_reserved -= num_bytes;
5958
5959                 if (delalloc)
5960                         cache->delalloc_bytes -= num_bytes;
5961         }
5962         spin_unlock(&cache->lock);
5963         spin_unlock(&space_info->lock);
5964         return ret;
5965 }
5966
5967 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5968                                 struct btrfs_root *root)
5969 {
5970         struct btrfs_fs_info *fs_info = root->fs_info;
5971         struct btrfs_caching_control *next;
5972         struct btrfs_caching_control *caching_ctl;
5973         struct btrfs_block_group_cache *cache;
5974
5975         down_write(&fs_info->commit_root_sem);
5976
5977         list_for_each_entry_safe(caching_ctl, next,
5978                                  &fs_info->caching_block_groups, list) {
5979                 cache = caching_ctl->block_group;
5980                 if (block_group_cache_done(cache)) {
5981                         cache->last_byte_to_unpin = (u64)-1;
5982                         list_del_init(&caching_ctl->list);
5983                         put_caching_control(caching_ctl);
5984                 } else {
5985                         cache->last_byte_to_unpin = caching_ctl->progress;
5986                 }
5987         }
5988
5989         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5990                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5991         else
5992                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5993
5994         up_write(&fs_info->commit_root_sem);
5995
5996         update_global_block_rsv(fs_info);
5997 }
5998
5999 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6000                               const bool return_free_space)
6001 {
6002         struct btrfs_fs_info *fs_info = root->fs_info;
6003         struct btrfs_block_group_cache *cache = NULL;
6004         struct btrfs_space_info *space_info;
6005         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6006         u64 len;
6007         bool readonly;
6008
6009         while (start <= end) {
6010                 readonly = false;
6011                 if (!cache ||
6012                     start >= cache->key.objectid + cache->key.offset) {
6013                         if (cache)
6014                                 btrfs_put_block_group(cache);
6015                         cache = btrfs_lookup_block_group(fs_info, start);
6016                         BUG_ON(!cache); /* Logic error */
6017                 }
6018
6019                 len = cache->key.objectid + cache->key.offset - start;
6020                 len = min(len, end + 1 - start);
6021
6022                 if (start < cache->last_byte_to_unpin) {
6023                         len = min(len, cache->last_byte_to_unpin - start);
6024                         if (return_free_space)
6025                                 btrfs_add_free_space(cache, start, len);
6026                 }
6027
6028                 start += len;
6029                 space_info = cache->space_info;
6030
6031                 spin_lock(&space_info->lock);
6032                 spin_lock(&cache->lock);
6033                 cache->pinned -= len;
6034                 space_info->bytes_pinned -= len;
6035                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6036                 if (cache->ro) {
6037                         space_info->bytes_readonly += len;
6038                         readonly = true;
6039                 }
6040                 spin_unlock(&cache->lock);
6041                 if (!readonly && global_rsv->space_info == space_info) {
6042                         spin_lock(&global_rsv->lock);
6043                         if (!global_rsv->full) {
6044                                 len = min(len, global_rsv->size -
6045                                           global_rsv->reserved);
6046                                 global_rsv->reserved += len;
6047                                 space_info->bytes_may_use += len;
6048                                 if (global_rsv->reserved >= global_rsv->size)
6049                                         global_rsv->full = 1;
6050                         }
6051                         spin_unlock(&global_rsv->lock);
6052                 }
6053                 spin_unlock(&space_info->lock);
6054         }
6055
6056         if (cache)
6057                 btrfs_put_block_group(cache);
6058         return 0;
6059 }
6060
6061 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6062                                struct btrfs_root *root)
6063 {
6064         struct btrfs_fs_info *fs_info = root->fs_info;
6065         struct extent_io_tree *unpin;
6066         u64 start;
6067         u64 end;
6068         int ret;
6069
6070         if (trans->aborted)
6071                 return 0;
6072
6073         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6074                 unpin = &fs_info->freed_extents[1];
6075         else
6076                 unpin = &fs_info->freed_extents[0];
6077
6078         while (1) {
6079                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6080                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6081                                             EXTENT_DIRTY, NULL);
6082                 if (ret) {
6083                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6084                         break;
6085                 }
6086
6087                 if (btrfs_test_opt(root, DISCARD))
6088                         ret = btrfs_discard_extent(root, start,
6089                                                    end + 1 - start, NULL);
6090
6091                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6092                 unpin_extent_range(root, start, end, true);
6093                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6094                 cond_resched();
6095         }
6096
6097         return 0;
6098 }
6099
6100 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6101                              u64 owner, u64 root_objectid)
6102 {
6103         struct btrfs_space_info *space_info;
6104         u64 flags;
6105
6106         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6107                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6108                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6109                 else
6110                         flags = BTRFS_BLOCK_GROUP_METADATA;
6111         } else {
6112                 flags = BTRFS_BLOCK_GROUP_DATA;
6113         }
6114
6115         space_info = __find_space_info(fs_info, flags);
6116         BUG_ON(!space_info); /* Logic bug */
6117         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6118 }
6119
6120
6121 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6122                                 struct btrfs_root *root,
6123                                 struct btrfs_delayed_ref_node *node, u64 parent,
6124                                 u64 root_objectid, u64 owner_objectid,
6125                                 u64 owner_offset, int refs_to_drop,
6126                                 struct btrfs_delayed_extent_op *extent_op)
6127 {
6128         struct btrfs_key key;
6129         struct btrfs_path *path;
6130         struct btrfs_fs_info *info = root->fs_info;
6131         struct btrfs_root *extent_root = info->extent_root;
6132         struct extent_buffer *leaf;
6133         struct btrfs_extent_item *ei;
6134         struct btrfs_extent_inline_ref *iref;
6135         int ret;
6136         int is_data;
6137         int extent_slot = 0;
6138         int found_extent = 0;
6139         int num_to_del = 1;
6140         int no_quota = node->no_quota;
6141         u32 item_size;
6142         u64 refs;
6143         u64 bytenr = node->bytenr;
6144         u64 num_bytes = node->num_bytes;
6145         int last_ref = 0;
6146         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6147                                                  SKINNY_METADATA);
6148
6149         if (!info->quota_enabled || !is_fstree(root_objectid))
6150                 no_quota = 1;
6151
6152         path = btrfs_alloc_path();
6153         if (!path)
6154                 return -ENOMEM;
6155
6156         path->reada = 1;
6157         path->leave_spinning = 1;
6158
6159         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6160         BUG_ON(!is_data && refs_to_drop != 1);
6161
6162         if (is_data)
6163                 skinny_metadata = 0;
6164
6165         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6166                                     bytenr, num_bytes, parent,
6167                                     root_objectid, owner_objectid,
6168                                     owner_offset);
6169         if (ret == 0) {
6170                 extent_slot = path->slots[0];
6171                 while (extent_slot >= 0) {
6172                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6173                                               extent_slot);
6174                         if (key.objectid != bytenr)
6175                                 break;
6176                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6177                             key.offset == num_bytes) {
6178                                 found_extent = 1;
6179                                 break;
6180                         }
6181                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6182                             key.offset == owner_objectid) {
6183                                 found_extent = 1;
6184                                 break;
6185                         }
6186                         if (path->slots[0] - extent_slot > 5)
6187                                 break;
6188                         extent_slot--;
6189                 }
6190 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6191                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6192                 if (found_extent && item_size < sizeof(*ei))
6193                         found_extent = 0;
6194 #endif
6195                 if (!found_extent) {
6196                         BUG_ON(iref);
6197                         ret = remove_extent_backref(trans, extent_root, path,
6198                                                     NULL, refs_to_drop,
6199                                                     is_data, &last_ref);
6200                         if (ret) {
6201                                 btrfs_abort_transaction(trans, extent_root, ret);
6202                                 goto out;
6203                         }
6204                         btrfs_release_path(path);
6205                         path->leave_spinning = 1;
6206
6207                         key.objectid = bytenr;
6208                         key.type = BTRFS_EXTENT_ITEM_KEY;
6209                         key.offset = num_bytes;
6210
6211                         if (!is_data && skinny_metadata) {
6212                                 key.type = BTRFS_METADATA_ITEM_KEY;
6213                                 key.offset = owner_objectid;
6214                         }
6215
6216                         ret = btrfs_search_slot(trans, extent_root,
6217                                                 &key, path, -1, 1);
6218                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6219                                 /*
6220                                  * Couldn't find our skinny metadata item,
6221                                  * see if we have ye olde extent item.
6222                                  */
6223                                 path->slots[0]--;
6224                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6225                                                       path->slots[0]);
6226                                 if (key.objectid == bytenr &&
6227                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6228                                     key.offset == num_bytes)
6229                                         ret = 0;
6230                         }
6231
6232                         if (ret > 0 && skinny_metadata) {
6233                                 skinny_metadata = false;
6234                                 key.objectid = bytenr;
6235                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6236                                 key.offset = num_bytes;
6237                                 btrfs_release_path(path);
6238                                 ret = btrfs_search_slot(trans, extent_root,
6239                                                         &key, path, -1, 1);
6240                         }
6241
6242                         if (ret) {
6243                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6244                                         ret, bytenr);
6245                                 if (ret > 0)
6246                                         btrfs_print_leaf(extent_root,
6247                                                          path->nodes[0]);
6248                         }
6249                         if (ret < 0) {
6250                                 btrfs_abort_transaction(trans, extent_root, ret);
6251                                 goto out;
6252                         }
6253                         extent_slot = path->slots[0];
6254                 }
6255         } else if (WARN_ON(ret == -ENOENT)) {
6256                 btrfs_print_leaf(extent_root, path->nodes[0]);
6257                 btrfs_err(info,
6258                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6259                         bytenr, parent, root_objectid, owner_objectid,
6260                         owner_offset);
6261                 btrfs_abort_transaction(trans, extent_root, ret);
6262                 goto out;
6263         } else {
6264                 btrfs_abort_transaction(trans, extent_root, ret);
6265                 goto out;
6266         }
6267
6268         leaf = path->nodes[0];
6269         item_size = btrfs_item_size_nr(leaf, extent_slot);
6270 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6271         if (item_size < sizeof(*ei)) {
6272                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6273                 ret = convert_extent_item_v0(trans, extent_root, path,
6274                                              owner_objectid, 0);
6275                 if (ret < 0) {
6276                         btrfs_abort_transaction(trans, extent_root, ret);
6277                         goto out;
6278                 }
6279
6280                 btrfs_release_path(path);
6281                 path->leave_spinning = 1;
6282
6283                 key.objectid = bytenr;
6284                 key.type = BTRFS_EXTENT_ITEM_KEY;
6285                 key.offset = num_bytes;
6286
6287                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6288                                         -1, 1);
6289                 if (ret) {
6290                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6291                                 ret, bytenr);
6292                         btrfs_print_leaf(extent_root, path->nodes[0]);
6293                 }
6294                 if (ret < 0) {
6295                         btrfs_abort_transaction(trans, extent_root, ret);
6296                         goto out;
6297                 }
6298
6299                 extent_slot = path->slots[0];
6300                 leaf = path->nodes[0];
6301                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6302         }
6303 #endif
6304         BUG_ON(item_size < sizeof(*ei));
6305         ei = btrfs_item_ptr(leaf, extent_slot,
6306                             struct btrfs_extent_item);
6307         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6308             key.type == BTRFS_EXTENT_ITEM_KEY) {
6309                 struct btrfs_tree_block_info *bi;
6310                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6311                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6312                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6313         }
6314
6315         refs = btrfs_extent_refs(leaf, ei);
6316         if (refs < refs_to_drop) {
6317                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6318                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6319                 ret = -EINVAL;
6320                 btrfs_abort_transaction(trans, extent_root, ret);
6321                 goto out;
6322         }
6323         refs -= refs_to_drop;
6324
6325         if (refs > 0) {
6326                 if (extent_op)
6327                         __run_delayed_extent_op(extent_op, leaf, ei);
6328                 /*
6329                  * In the case of inline back ref, reference count will
6330                  * be updated by remove_extent_backref
6331                  */
6332                 if (iref) {
6333                         BUG_ON(!found_extent);
6334                 } else {
6335                         btrfs_set_extent_refs(leaf, ei, refs);
6336                         btrfs_mark_buffer_dirty(leaf);
6337                 }
6338                 if (found_extent) {
6339                         ret = remove_extent_backref(trans, extent_root, path,
6340                                                     iref, refs_to_drop,
6341                                                     is_data, &last_ref);
6342                         if (ret) {
6343                                 btrfs_abort_transaction(trans, extent_root, ret);
6344                                 goto out;
6345                         }
6346                 }
6347                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6348                                  root_objectid);
6349         } else {
6350                 if (found_extent) {
6351                         BUG_ON(is_data && refs_to_drop !=
6352                                extent_data_ref_count(root, path, iref));
6353                         if (iref) {
6354                                 BUG_ON(path->slots[0] != extent_slot);
6355                         } else {
6356                                 BUG_ON(path->slots[0] != extent_slot + 1);
6357                                 path->slots[0] = extent_slot;
6358                                 num_to_del = 2;
6359                         }
6360                 }
6361
6362                 last_ref = 1;
6363                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6364                                       num_to_del);
6365                 if (ret) {
6366                         btrfs_abort_transaction(trans, extent_root, ret);
6367                         goto out;
6368                 }
6369                 btrfs_release_path(path);
6370
6371                 if (is_data) {
6372                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6373                         if (ret) {
6374                                 btrfs_abort_transaction(trans, extent_root, ret);
6375                                 goto out;
6376                         }
6377                 }
6378
6379                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6380                 if (ret) {
6381                         btrfs_abort_transaction(trans, extent_root, ret);
6382                         goto out;
6383                 }
6384         }
6385         btrfs_release_path(path);
6386
6387 out:
6388         btrfs_free_path(path);
6389         return ret;
6390 }
6391
6392 /*
6393  * when we free an block, it is possible (and likely) that we free the last
6394  * delayed ref for that extent as well.  This searches the delayed ref tree for
6395  * a given extent, and if there are no other delayed refs to be processed, it
6396  * removes it from the tree.
6397  */
6398 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6399                                       struct btrfs_root *root, u64 bytenr)
6400 {
6401         struct btrfs_delayed_ref_head *head;
6402         struct btrfs_delayed_ref_root *delayed_refs;
6403         int ret = 0;
6404
6405         delayed_refs = &trans->transaction->delayed_refs;
6406         spin_lock(&delayed_refs->lock);
6407         head = btrfs_find_delayed_ref_head(trans, bytenr);
6408         if (!head)
6409                 goto out_delayed_unlock;
6410
6411         spin_lock(&head->lock);
6412         if (!list_empty(&head->ref_list))
6413                 goto out;
6414
6415         if (head->extent_op) {
6416                 if (!head->must_insert_reserved)
6417                         goto out;
6418                 btrfs_free_delayed_extent_op(head->extent_op);
6419                 head->extent_op = NULL;
6420         }
6421
6422         /*
6423          * waiting for the lock here would deadlock.  If someone else has it
6424          * locked they are already in the process of dropping it anyway
6425          */
6426         if (!mutex_trylock(&head->mutex))
6427                 goto out;
6428
6429         /*
6430          * at this point we have a head with no other entries.  Go
6431          * ahead and process it.
6432          */
6433         head->node.in_tree = 0;
6434         rb_erase(&head->href_node, &delayed_refs->href_root);
6435
6436         atomic_dec(&delayed_refs->num_entries);
6437
6438         /*
6439          * we don't take a ref on the node because we're removing it from the
6440          * tree, so we just steal the ref the tree was holding.
6441          */
6442         delayed_refs->num_heads--;
6443         if (head->processing == 0)
6444                 delayed_refs->num_heads_ready--;
6445         head->processing = 0;
6446         spin_unlock(&head->lock);
6447         spin_unlock(&delayed_refs->lock);
6448
6449         BUG_ON(head->extent_op);
6450         if (head->must_insert_reserved)
6451                 ret = 1;
6452
6453         mutex_unlock(&head->mutex);
6454         btrfs_put_delayed_ref(&head->node);
6455         return ret;
6456 out:
6457         spin_unlock(&head->lock);
6458
6459 out_delayed_unlock:
6460         spin_unlock(&delayed_refs->lock);
6461         return 0;
6462 }
6463
6464 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6465                            struct btrfs_root *root,
6466                            struct extent_buffer *buf,
6467                            u64 parent, int last_ref)
6468 {
6469         int pin = 1;
6470         int ret;
6471
6472         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6473                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6474                                         buf->start, buf->len,
6475                                         parent, root->root_key.objectid,
6476                                         btrfs_header_level(buf),
6477                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6478                 BUG_ON(ret); /* -ENOMEM */
6479         }
6480
6481         if (!last_ref)
6482                 return;
6483
6484         if (btrfs_header_generation(buf) == trans->transid) {
6485                 struct btrfs_block_group_cache *cache;
6486
6487                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6488                         ret = check_ref_cleanup(trans, root, buf->start);
6489                         if (!ret)
6490                                 goto out;
6491                 }
6492
6493                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6494
6495                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6496                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6497                         btrfs_put_block_group(cache);
6498                         goto out;
6499                 }
6500
6501                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6502
6503                 btrfs_add_free_space(cache, buf->start, buf->len);
6504                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6505                 btrfs_put_block_group(cache);
6506                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6507                 pin = 0;
6508         }
6509 out:
6510         if (pin)
6511                 add_pinned_bytes(root->fs_info, buf->len,
6512                                  btrfs_header_level(buf),
6513                                  root->root_key.objectid);
6514
6515         /*
6516          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6517          * anymore.
6518          */
6519         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6520 }
6521
6522 /* Can return -ENOMEM */
6523 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6524                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6525                       u64 owner, u64 offset, int no_quota)
6526 {
6527         int ret;
6528         struct btrfs_fs_info *fs_info = root->fs_info;
6529
6530         if (btrfs_test_is_dummy_root(root))
6531                 return 0;
6532
6533         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6534
6535         /*
6536          * tree log blocks never actually go into the extent allocation
6537          * tree, just update pinning info and exit early.
6538          */
6539         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6540                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6541                 /* unlocks the pinned mutex */
6542                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6543                 ret = 0;
6544         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6545                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6546                                         num_bytes,
6547                                         parent, root_objectid, (int)owner,
6548                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6549         } else {
6550                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6551                                                 num_bytes,
6552                                                 parent, root_objectid, owner,
6553                                                 offset, BTRFS_DROP_DELAYED_REF,
6554                                                 NULL, no_quota);
6555         }
6556         return ret;
6557 }
6558
6559 /*
6560  * when we wait for progress in the block group caching, its because
6561  * our allocation attempt failed at least once.  So, we must sleep
6562  * and let some progress happen before we try again.
6563  *
6564  * This function will sleep at least once waiting for new free space to
6565  * show up, and then it will check the block group free space numbers
6566  * for our min num_bytes.  Another option is to have it go ahead
6567  * and look in the rbtree for a free extent of a given size, but this
6568  * is a good start.
6569  *
6570  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6571  * any of the information in this block group.
6572  */
6573 static noinline void
6574 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6575                                 u64 num_bytes)
6576 {
6577         struct btrfs_caching_control *caching_ctl;
6578
6579         caching_ctl = get_caching_control(cache);
6580         if (!caching_ctl)
6581                 return;
6582
6583         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6584                    (cache->free_space_ctl->free_space >= num_bytes));
6585
6586         put_caching_control(caching_ctl);
6587 }
6588
6589 static noinline int
6590 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6591 {
6592         struct btrfs_caching_control *caching_ctl;
6593         int ret = 0;
6594
6595         caching_ctl = get_caching_control(cache);
6596         if (!caching_ctl)
6597                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6598
6599         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6600         if (cache->cached == BTRFS_CACHE_ERROR)
6601                 ret = -EIO;
6602         put_caching_control(caching_ctl);
6603         return ret;
6604 }
6605
6606 int __get_raid_index(u64 flags)
6607 {
6608         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6609                 return BTRFS_RAID_RAID10;
6610         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6611                 return BTRFS_RAID_RAID1;
6612         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6613                 return BTRFS_RAID_DUP;
6614         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6615                 return BTRFS_RAID_RAID0;
6616         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6617                 return BTRFS_RAID_RAID5;
6618         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6619                 return BTRFS_RAID_RAID6;
6620
6621         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6622 }
6623
6624 int get_block_group_index(struct btrfs_block_group_cache *cache)
6625 {
6626         return __get_raid_index(cache->flags);
6627 }
6628
6629 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6630         [BTRFS_RAID_RAID10]     = "raid10",
6631         [BTRFS_RAID_RAID1]      = "raid1",
6632         [BTRFS_RAID_DUP]        = "dup",
6633         [BTRFS_RAID_RAID0]      = "raid0",
6634         [BTRFS_RAID_SINGLE]     = "single",
6635         [BTRFS_RAID_RAID5]      = "raid5",
6636         [BTRFS_RAID_RAID6]      = "raid6",
6637 };
6638
6639 static const char *get_raid_name(enum btrfs_raid_types type)
6640 {
6641         if (type >= BTRFS_NR_RAID_TYPES)
6642                 return NULL;
6643
6644         return btrfs_raid_type_names[type];
6645 }
6646
6647 enum btrfs_loop_type {
6648         LOOP_CACHING_NOWAIT = 0,
6649         LOOP_CACHING_WAIT = 1,
6650         LOOP_ALLOC_CHUNK = 2,
6651         LOOP_NO_EMPTY_SIZE = 3,
6652 };
6653
6654 static inline void
6655 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6656                        int delalloc)
6657 {
6658         if (delalloc)
6659                 down_read(&cache->data_rwsem);
6660 }
6661
6662 static inline void
6663 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6664                        int delalloc)
6665 {
6666         btrfs_get_block_group(cache);
6667         if (delalloc)
6668                 down_read(&cache->data_rwsem);
6669 }
6670
6671 static struct btrfs_block_group_cache *
6672 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6673                    struct btrfs_free_cluster *cluster,
6674                    int delalloc)
6675 {
6676         struct btrfs_block_group_cache *used_bg;
6677         bool locked = false;
6678 again:
6679         spin_lock(&cluster->refill_lock);
6680         if (locked) {
6681                 if (used_bg == cluster->block_group)
6682                         return used_bg;
6683
6684                 up_read(&used_bg->data_rwsem);
6685                 btrfs_put_block_group(used_bg);
6686         }
6687
6688         used_bg = cluster->block_group;
6689         if (!used_bg)
6690                 return NULL;
6691
6692         if (used_bg == block_group)
6693                 return used_bg;
6694
6695         btrfs_get_block_group(used_bg);
6696
6697         if (!delalloc)
6698                 return used_bg;
6699
6700         if (down_read_trylock(&used_bg->data_rwsem))
6701                 return used_bg;
6702
6703         spin_unlock(&cluster->refill_lock);
6704         down_read(&used_bg->data_rwsem);
6705         locked = true;
6706         goto again;
6707 }
6708
6709 static inline void
6710 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6711                          int delalloc)
6712 {
6713         if (delalloc)
6714                 up_read(&cache->data_rwsem);
6715         btrfs_put_block_group(cache);
6716 }
6717
6718 /*
6719  * walks the btree of allocated extents and find a hole of a given size.
6720  * The key ins is changed to record the hole:
6721  * ins->objectid == start position
6722  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6723  * ins->offset == the size of the hole.
6724  * Any available blocks before search_start are skipped.
6725  *
6726  * If there is no suitable free space, we will record the max size of
6727  * the free space extent currently.
6728  */
6729 static noinline int find_free_extent(struct btrfs_root *orig_root,
6730                                      u64 num_bytes, u64 empty_size,
6731                                      u64 hint_byte, struct btrfs_key *ins,
6732                                      u64 flags, int delalloc)
6733 {
6734         int ret = 0;
6735         struct btrfs_root *root = orig_root->fs_info->extent_root;
6736         struct btrfs_free_cluster *last_ptr = NULL;
6737         struct btrfs_block_group_cache *block_group = NULL;
6738         u64 search_start = 0;
6739         u64 max_extent_size = 0;
6740         int empty_cluster = 2 * 1024 * 1024;
6741         struct btrfs_space_info *space_info;
6742         int loop = 0;
6743         int index = __get_raid_index(flags);
6744         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6745                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6746         bool failed_cluster_refill = false;
6747         bool failed_alloc = false;
6748         bool use_cluster = true;
6749         bool have_caching_bg = false;
6750
6751         WARN_ON(num_bytes < root->sectorsize);
6752         ins->type = BTRFS_EXTENT_ITEM_KEY;
6753         ins->objectid = 0;
6754         ins->offset = 0;
6755
6756         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6757
6758         space_info = __find_space_info(root->fs_info, flags);
6759         if (!space_info) {
6760                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6761                 return -ENOSPC;
6762         }
6763
6764         /*
6765          * If the space info is for both data and metadata it means we have a
6766          * small filesystem and we can't use the clustering stuff.
6767          */
6768         if (btrfs_mixed_space_info(space_info))
6769                 use_cluster = false;
6770
6771         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6772                 last_ptr = &root->fs_info->meta_alloc_cluster;
6773                 if (!btrfs_test_opt(root, SSD))
6774                         empty_cluster = 64 * 1024;
6775         }
6776
6777         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6778             btrfs_test_opt(root, SSD)) {
6779                 last_ptr = &root->fs_info->data_alloc_cluster;
6780         }
6781
6782         if (last_ptr) {
6783                 spin_lock(&last_ptr->lock);
6784                 if (last_ptr->block_group)
6785                         hint_byte = last_ptr->window_start;
6786                 spin_unlock(&last_ptr->lock);
6787         }
6788
6789         search_start = max(search_start, first_logical_byte(root, 0));
6790         search_start = max(search_start, hint_byte);
6791
6792         if (!last_ptr)
6793                 empty_cluster = 0;
6794
6795         if (search_start == hint_byte) {
6796                 block_group = btrfs_lookup_block_group(root->fs_info,
6797                                                        search_start);
6798                 /*
6799                  * we don't want to use the block group if it doesn't match our
6800                  * allocation bits, or if its not cached.
6801                  *
6802                  * However if we are re-searching with an ideal block group
6803                  * picked out then we don't care that the block group is cached.
6804                  */
6805                 if (block_group && block_group_bits(block_group, flags) &&
6806                     block_group->cached != BTRFS_CACHE_NO) {
6807                         down_read(&space_info->groups_sem);
6808                         if (list_empty(&block_group->list) ||
6809                             block_group->ro) {
6810                                 /*
6811                                  * someone is removing this block group,
6812                                  * we can't jump into the have_block_group
6813                                  * target because our list pointers are not
6814                                  * valid
6815                                  */
6816                                 btrfs_put_block_group(block_group);
6817                                 up_read(&space_info->groups_sem);
6818                         } else {
6819                                 index = get_block_group_index(block_group);
6820                                 btrfs_lock_block_group(block_group, delalloc);
6821                                 goto have_block_group;
6822                         }
6823                 } else if (block_group) {
6824                         btrfs_put_block_group(block_group);
6825                 }
6826         }
6827 search:
6828         have_caching_bg = false;
6829         down_read(&space_info->groups_sem);
6830         list_for_each_entry(block_group, &space_info->block_groups[index],
6831                             list) {
6832                 u64 offset;
6833                 int cached;
6834
6835                 btrfs_grab_block_group(block_group, delalloc);
6836                 search_start = block_group->key.objectid;
6837
6838                 /*
6839                  * this can happen if we end up cycling through all the
6840                  * raid types, but we want to make sure we only allocate
6841                  * for the proper type.
6842                  */
6843                 if (!block_group_bits(block_group, flags)) {
6844                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6845                                 BTRFS_BLOCK_GROUP_RAID1 |
6846                                 BTRFS_BLOCK_GROUP_RAID5 |
6847                                 BTRFS_BLOCK_GROUP_RAID6 |
6848                                 BTRFS_BLOCK_GROUP_RAID10;
6849
6850                         /*
6851                          * if they asked for extra copies and this block group
6852                          * doesn't provide them, bail.  This does allow us to
6853                          * fill raid0 from raid1.
6854                          */
6855                         if ((flags & extra) && !(block_group->flags & extra))
6856                                 goto loop;
6857                 }
6858
6859 have_block_group:
6860                 cached = block_group_cache_done(block_group);
6861                 if (unlikely(!cached)) {
6862                         ret = cache_block_group(block_group, 0);
6863                         BUG_ON(ret < 0);
6864                         ret = 0;
6865                 }
6866
6867                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6868                         goto loop;
6869                 if (unlikely(block_group->ro))
6870                         goto loop;
6871
6872                 /*
6873                  * Ok we want to try and use the cluster allocator, so
6874                  * lets look there
6875                  */
6876                 if (last_ptr) {
6877                         struct btrfs_block_group_cache *used_block_group;
6878                         unsigned long aligned_cluster;
6879                         /*
6880                          * the refill lock keeps out other
6881                          * people trying to start a new cluster
6882                          */
6883                         used_block_group = btrfs_lock_cluster(block_group,
6884                                                               last_ptr,
6885                                                               delalloc);
6886                         if (!used_block_group)
6887                                 goto refill_cluster;
6888
6889                         if (used_block_group != block_group &&
6890                             (used_block_group->ro ||
6891                              !block_group_bits(used_block_group, flags)))
6892                                 goto release_cluster;
6893
6894                         offset = btrfs_alloc_from_cluster(used_block_group,
6895                                                 last_ptr,
6896                                                 num_bytes,
6897                                                 used_block_group->key.objectid,
6898                                                 &max_extent_size);
6899                         if (offset) {
6900                                 /* we have a block, we're done */
6901                                 spin_unlock(&last_ptr->refill_lock);
6902                                 trace_btrfs_reserve_extent_cluster(root,
6903                                                 used_block_group,
6904                                                 search_start, num_bytes);
6905                                 if (used_block_group != block_group) {
6906                                         btrfs_release_block_group(block_group,
6907                                                                   delalloc);
6908                                         block_group = used_block_group;
6909                                 }
6910                                 goto checks;
6911                         }
6912
6913                         WARN_ON(last_ptr->block_group != used_block_group);
6914 release_cluster:
6915                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6916                          * set up a new clusters, so lets just skip it
6917                          * and let the allocator find whatever block
6918                          * it can find.  If we reach this point, we
6919                          * will have tried the cluster allocator
6920                          * plenty of times and not have found
6921                          * anything, so we are likely way too
6922                          * fragmented for the clustering stuff to find
6923                          * anything.
6924                          *
6925                          * However, if the cluster is taken from the
6926                          * current block group, release the cluster
6927                          * first, so that we stand a better chance of
6928                          * succeeding in the unclustered
6929                          * allocation.  */
6930                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6931                             used_block_group != block_group) {
6932                                 spin_unlock(&last_ptr->refill_lock);
6933                                 btrfs_release_block_group(used_block_group,
6934                                                           delalloc);
6935                                 goto unclustered_alloc;
6936                         }
6937
6938                         /*
6939                          * this cluster didn't work out, free it and
6940                          * start over
6941                          */
6942                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6943
6944                         if (used_block_group != block_group)
6945                                 btrfs_release_block_group(used_block_group,
6946                                                           delalloc);
6947 refill_cluster:
6948                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6949                                 spin_unlock(&last_ptr->refill_lock);
6950                                 goto unclustered_alloc;
6951                         }
6952
6953                         aligned_cluster = max_t(unsigned long,
6954                                                 empty_cluster + empty_size,
6955                                               block_group->full_stripe_len);
6956
6957                         /* allocate a cluster in this block group */
6958                         ret = btrfs_find_space_cluster(root, block_group,
6959                                                        last_ptr, search_start,
6960                                                        num_bytes,
6961                                                        aligned_cluster);
6962                         if (ret == 0) {
6963                                 /*
6964                                  * now pull our allocation out of this
6965                                  * cluster
6966                                  */
6967                                 offset = btrfs_alloc_from_cluster(block_group,
6968                                                         last_ptr,
6969                                                         num_bytes,
6970                                                         search_start,
6971                                                         &max_extent_size);
6972                                 if (offset) {
6973                                         /* we found one, proceed */
6974                                         spin_unlock(&last_ptr->refill_lock);
6975                                         trace_btrfs_reserve_extent_cluster(root,
6976                                                 block_group, search_start,
6977                                                 num_bytes);
6978                                         goto checks;
6979                                 }
6980                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6981                                    && !failed_cluster_refill) {
6982                                 spin_unlock(&last_ptr->refill_lock);
6983
6984                                 failed_cluster_refill = true;
6985                                 wait_block_group_cache_progress(block_group,
6986                                        num_bytes + empty_cluster + empty_size);
6987                                 goto have_block_group;
6988                         }
6989
6990                         /*
6991                          * at this point we either didn't find a cluster
6992                          * or we weren't able to allocate a block from our
6993                          * cluster.  Free the cluster we've been trying
6994                          * to use, and go to the next block group
6995                          */
6996                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6997                         spin_unlock(&last_ptr->refill_lock);
6998                         goto loop;
6999                 }
7000
7001 unclustered_alloc:
7002                 spin_lock(&block_group->free_space_ctl->tree_lock);
7003                 if (cached &&
7004                     block_group->free_space_ctl->free_space <
7005                     num_bytes + empty_cluster + empty_size) {
7006                         if (block_group->free_space_ctl->free_space >
7007                             max_extent_size)
7008                                 max_extent_size =
7009                                         block_group->free_space_ctl->free_space;
7010                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7011                         goto loop;
7012                 }
7013                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7014
7015                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7016                                                     num_bytes, empty_size,
7017                                                     &max_extent_size);
7018                 /*
7019                  * If we didn't find a chunk, and we haven't failed on this
7020                  * block group before, and this block group is in the middle of
7021                  * caching and we are ok with waiting, then go ahead and wait
7022                  * for progress to be made, and set failed_alloc to true.
7023                  *
7024                  * If failed_alloc is true then we've already waited on this
7025                  * block group once and should move on to the next block group.
7026                  */
7027                 if (!offset && !failed_alloc && !cached &&
7028                     loop > LOOP_CACHING_NOWAIT) {
7029                         wait_block_group_cache_progress(block_group,
7030                                                 num_bytes + empty_size);
7031                         failed_alloc = true;
7032                         goto have_block_group;
7033                 } else if (!offset) {
7034                         if (!cached)
7035                                 have_caching_bg = true;
7036                         goto loop;
7037                 }
7038 checks:
7039                 search_start = ALIGN(offset, root->stripesize);
7040
7041                 /* move on to the next group */
7042                 if (search_start + num_bytes >
7043                     block_group->key.objectid + block_group->key.offset) {
7044                         btrfs_add_free_space(block_group, offset, num_bytes);
7045                         goto loop;
7046                 }
7047
7048                 if (offset < search_start)
7049                         btrfs_add_free_space(block_group, offset,
7050                                              search_start - offset);
7051                 BUG_ON(offset > search_start);
7052
7053                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7054                                                   alloc_type, delalloc);
7055                 if (ret == -EAGAIN) {
7056                         btrfs_add_free_space(block_group, offset, num_bytes);
7057                         goto loop;
7058                 }
7059
7060                 /* we are all good, lets return */
7061                 ins->objectid = search_start;
7062                 ins->offset = num_bytes;
7063
7064                 trace_btrfs_reserve_extent(orig_root, block_group,
7065                                            search_start, num_bytes);
7066                 btrfs_release_block_group(block_group, delalloc);
7067                 break;
7068 loop:
7069                 failed_cluster_refill = false;
7070                 failed_alloc = false;
7071                 BUG_ON(index != get_block_group_index(block_group));
7072                 btrfs_release_block_group(block_group, delalloc);
7073         }
7074         up_read(&space_info->groups_sem);
7075
7076         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7077                 goto search;
7078
7079         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7080                 goto search;
7081
7082         /*
7083          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7084          *                      caching kthreads as we move along
7085          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7086          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7087          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7088          *                      again
7089          */
7090         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7091                 index = 0;
7092                 loop++;
7093                 if (loop == LOOP_ALLOC_CHUNK) {
7094                         struct btrfs_trans_handle *trans;
7095                         int exist = 0;
7096
7097                         trans = current->journal_info;
7098                         if (trans)
7099                                 exist = 1;
7100                         else
7101                                 trans = btrfs_join_transaction(root);
7102
7103                         if (IS_ERR(trans)) {
7104                                 ret = PTR_ERR(trans);
7105                                 goto out;
7106                         }
7107
7108                         ret = do_chunk_alloc(trans, root, flags,
7109                                              CHUNK_ALLOC_FORCE);
7110                         /*
7111                          * Do not bail out on ENOSPC since we
7112                          * can do more things.
7113                          */
7114                         if (ret < 0 && ret != -ENOSPC)
7115                                 btrfs_abort_transaction(trans,
7116                                                         root, ret);
7117                         else
7118                                 ret = 0;
7119                         if (!exist)
7120                                 btrfs_end_transaction(trans, root);
7121                         if (ret)
7122                                 goto out;
7123                 }
7124
7125                 if (loop == LOOP_NO_EMPTY_SIZE) {
7126                         empty_size = 0;
7127                         empty_cluster = 0;
7128                 }
7129
7130                 goto search;
7131         } else if (!ins->objectid) {
7132                 ret = -ENOSPC;
7133         } else if (ins->objectid) {
7134                 ret = 0;
7135         }
7136 out:
7137         if (ret == -ENOSPC)
7138                 ins->offset = max_extent_size;
7139         return ret;
7140 }
7141
7142 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7143                             int dump_block_groups)
7144 {
7145         struct btrfs_block_group_cache *cache;
7146         int index = 0;
7147
7148         spin_lock(&info->lock);
7149         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7150                info->flags,
7151                info->total_bytes - info->bytes_used - info->bytes_pinned -
7152                info->bytes_reserved - info->bytes_readonly,
7153                (info->full) ? "" : "not ");
7154         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7155                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7156                info->total_bytes, info->bytes_used, info->bytes_pinned,
7157                info->bytes_reserved, info->bytes_may_use,
7158                info->bytes_readonly);
7159         spin_unlock(&info->lock);
7160
7161         if (!dump_block_groups)
7162                 return;
7163
7164         down_read(&info->groups_sem);
7165 again:
7166         list_for_each_entry(cache, &info->block_groups[index], list) {
7167                 spin_lock(&cache->lock);
7168                 printk(KERN_INFO "BTRFS: "
7169                            "block group %llu has %llu bytes, "
7170                            "%llu used %llu pinned %llu reserved %s\n",
7171                        cache->key.objectid, cache->key.offset,
7172                        btrfs_block_group_used(&cache->item), cache->pinned,
7173                        cache->reserved, cache->ro ? "[readonly]" : "");
7174                 btrfs_dump_free_space(cache, bytes);
7175                 spin_unlock(&cache->lock);
7176         }
7177         if (++index < BTRFS_NR_RAID_TYPES)
7178                 goto again;
7179         up_read(&info->groups_sem);
7180 }
7181
7182 int btrfs_reserve_extent(struct btrfs_root *root,
7183                          u64 num_bytes, u64 min_alloc_size,
7184                          u64 empty_size, u64 hint_byte,
7185                          struct btrfs_key *ins, int is_data, int delalloc)
7186 {
7187         bool final_tried = false;
7188         u64 flags;
7189         int ret;
7190
7191         flags = btrfs_get_alloc_profile(root, is_data);
7192 again:
7193         WARN_ON(num_bytes < root->sectorsize);
7194         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7195                                flags, delalloc);
7196
7197         if (ret == -ENOSPC) {
7198                 if (!final_tried && ins->offset) {
7199                         num_bytes = min(num_bytes >> 1, ins->offset);
7200                         num_bytes = round_down(num_bytes, root->sectorsize);
7201                         num_bytes = max(num_bytes, min_alloc_size);
7202                         if (num_bytes == min_alloc_size)
7203                                 final_tried = true;
7204                         goto again;
7205                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7206                         struct btrfs_space_info *sinfo;
7207
7208                         sinfo = __find_space_info(root->fs_info, flags);
7209                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7210                                 flags, num_bytes);
7211                         if (sinfo)
7212                                 dump_space_info(sinfo, num_bytes, 1);
7213                 }
7214         }
7215
7216         return ret;
7217 }
7218
7219 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7220                                         u64 start, u64 len,
7221                                         int pin, int delalloc)
7222 {
7223         struct btrfs_block_group_cache *cache;
7224         int ret = 0;
7225
7226         cache = btrfs_lookup_block_group(root->fs_info, start);
7227         if (!cache) {
7228                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7229                         start);
7230                 return -ENOSPC;
7231         }
7232
7233         if (pin)
7234                 pin_down_extent(root, cache, start, len, 1);
7235         else {
7236                 if (btrfs_test_opt(root, DISCARD))
7237                         ret = btrfs_discard_extent(root, start, len, NULL);
7238                 btrfs_add_free_space(cache, start, len);
7239                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7240         }
7241
7242         btrfs_put_block_group(cache);
7243
7244         trace_btrfs_reserved_extent_free(root, start, len);
7245
7246         return ret;
7247 }
7248
7249 int btrfs_free_reserved_extent(struct btrfs_root *root,
7250                                u64 start, u64 len, int delalloc)
7251 {
7252         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7253 }
7254
7255 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7256                                        u64 start, u64 len)
7257 {
7258         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7259 }
7260
7261 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7262                                       struct btrfs_root *root,
7263                                       u64 parent, u64 root_objectid,
7264                                       u64 flags, u64 owner, u64 offset,
7265                                       struct btrfs_key *ins, int ref_mod)
7266 {
7267         int ret;
7268         struct btrfs_fs_info *fs_info = root->fs_info;
7269         struct btrfs_extent_item *extent_item;
7270         struct btrfs_extent_inline_ref *iref;
7271         struct btrfs_path *path;
7272         struct extent_buffer *leaf;
7273         int type;
7274         u32 size;
7275
7276         if (parent > 0)
7277                 type = BTRFS_SHARED_DATA_REF_KEY;
7278         else
7279                 type = BTRFS_EXTENT_DATA_REF_KEY;
7280
7281         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7282
7283         path = btrfs_alloc_path();
7284         if (!path)
7285                 return -ENOMEM;
7286
7287         path->leave_spinning = 1;
7288         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7289                                       ins, size);
7290         if (ret) {
7291                 btrfs_free_path(path);
7292                 return ret;
7293         }
7294
7295         leaf = path->nodes[0];
7296         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7297                                      struct btrfs_extent_item);
7298         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7299         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7300         btrfs_set_extent_flags(leaf, extent_item,
7301                                flags | BTRFS_EXTENT_FLAG_DATA);
7302
7303         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7304         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7305         if (parent > 0) {
7306                 struct btrfs_shared_data_ref *ref;
7307                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7308                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7309                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7310         } else {
7311                 struct btrfs_extent_data_ref *ref;
7312                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7313                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7314                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7315                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7316                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7317         }
7318
7319         btrfs_mark_buffer_dirty(path->nodes[0]);
7320         btrfs_free_path(path);
7321
7322         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7323         if (ret) { /* -ENOENT, logic error */
7324                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7325                         ins->objectid, ins->offset);
7326                 BUG();
7327         }
7328         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7329         return ret;
7330 }
7331
7332 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7333                                      struct btrfs_root *root,
7334                                      u64 parent, u64 root_objectid,
7335                                      u64 flags, struct btrfs_disk_key *key,
7336                                      int level, struct btrfs_key *ins,
7337                                      int no_quota)
7338 {
7339         int ret;
7340         struct btrfs_fs_info *fs_info = root->fs_info;
7341         struct btrfs_extent_item *extent_item;
7342         struct btrfs_tree_block_info *block_info;
7343         struct btrfs_extent_inline_ref *iref;
7344         struct btrfs_path *path;
7345         struct extent_buffer *leaf;
7346         u32 size = sizeof(*extent_item) + sizeof(*iref);
7347         u64 num_bytes = ins->offset;
7348         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7349                                                  SKINNY_METADATA);
7350
7351         if (!skinny_metadata)
7352                 size += sizeof(*block_info);
7353
7354         path = btrfs_alloc_path();
7355         if (!path) {
7356                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7357                                                    root->nodesize);
7358                 return -ENOMEM;
7359         }
7360
7361         path->leave_spinning = 1;
7362         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7363                                       ins, size);
7364         if (ret) {
7365                 btrfs_free_path(path);
7366                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7367                                                    root->nodesize);
7368                 return ret;
7369         }
7370
7371         leaf = path->nodes[0];
7372         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7373                                      struct btrfs_extent_item);
7374         btrfs_set_extent_refs(leaf, extent_item, 1);
7375         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7376         btrfs_set_extent_flags(leaf, extent_item,
7377                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7378
7379         if (skinny_metadata) {
7380                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7381                 num_bytes = root->nodesize;
7382         } else {
7383                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7384                 btrfs_set_tree_block_key(leaf, block_info, key);
7385                 btrfs_set_tree_block_level(leaf, block_info, level);
7386                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7387         }
7388
7389         if (parent > 0) {
7390                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7391                 btrfs_set_extent_inline_ref_type(leaf, iref,
7392                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7393                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7394         } else {
7395                 btrfs_set_extent_inline_ref_type(leaf, iref,
7396                                                  BTRFS_TREE_BLOCK_REF_KEY);
7397                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7398         }
7399
7400         btrfs_mark_buffer_dirty(leaf);
7401         btrfs_free_path(path);
7402
7403         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7404                                  1);
7405         if (ret) { /* -ENOENT, logic error */
7406                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7407                         ins->objectid, ins->offset);
7408                 BUG();
7409         }
7410
7411         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7412         return ret;
7413 }
7414
7415 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7416                                      struct btrfs_root *root,
7417                                      u64 root_objectid, u64 owner,
7418                                      u64 offset, struct btrfs_key *ins)
7419 {
7420         int ret;
7421
7422         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7423
7424         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7425                                          ins->offset, 0,
7426                                          root_objectid, owner, offset,
7427                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7428         return ret;
7429 }
7430
7431 /*
7432  * this is used by the tree logging recovery code.  It records that
7433  * an extent has been allocated and makes sure to clear the free
7434  * space cache bits as well
7435  */
7436 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7437                                    struct btrfs_root *root,
7438                                    u64 root_objectid, u64 owner, u64 offset,
7439                                    struct btrfs_key *ins)
7440 {
7441         int ret;
7442         struct btrfs_block_group_cache *block_group;
7443
7444         /*
7445          * Mixed block groups will exclude before processing the log so we only
7446          * need to do the exlude dance if this fs isn't mixed.
7447          */
7448         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7449                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7450                 if (ret)
7451                         return ret;
7452         }
7453
7454         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7455         if (!block_group)
7456                 return -EINVAL;
7457
7458         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7459                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7460         BUG_ON(ret); /* logic error */
7461         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7462                                          0, owner, offset, ins, 1);
7463         btrfs_put_block_group(block_group);
7464         return ret;
7465 }
7466
7467 static struct extent_buffer *
7468 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7469                       u64 bytenr, int level)
7470 {
7471         struct extent_buffer *buf;
7472
7473         buf = btrfs_find_create_tree_block(root, bytenr);
7474         if (!buf)
7475                 return ERR_PTR(-ENOMEM);
7476         btrfs_set_header_generation(buf, trans->transid);
7477         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7478         btrfs_tree_lock(buf);
7479         clean_tree_block(trans, root->fs_info, buf);
7480         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7481
7482         btrfs_set_lock_blocking(buf);
7483         btrfs_set_buffer_uptodate(buf);
7484
7485         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7486                 buf->log_index = root->log_transid % 2;
7487                 /*
7488                  * we allow two log transactions at a time, use different
7489                  * EXENT bit to differentiate dirty pages.
7490                  */
7491                 if (buf->log_index == 0)
7492                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7493                                         buf->start + buf->len - 1, GFP_NOFS);
7494                 else
7495                         set_extent_new(&root->dirty_log_pages, buf->start,
7496                                         buf->start + buf->len - 1, GFP_NOFS);
7497         } else {
7498                 buf->log_index = -1;
7499                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7500                          buf->start + buf->len - 1, GFP_NOFS);
7501         }
7502         trans->blocks_used++;
7503         /* this returns a buffer locked for blocking */
7504         return buf;
7505 }
7506
7507 static struct btrfs_block_rsv *
7508 use_block_rsv(struct btrfs_trans_handle *trans,
7509               struct btrfs_root *root, u32 blocksize)
7510 {
7511         struct btrfs_block_rsv *block_rsv;
7512         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7513         int ret;
7514         bool global_updated = false;
7515
7516         block_rsv = get_block_rsv(trans, root);
7517
7518         if (unlikely(block_rsv->size == 0))
7519                 goto try_reserve;
7520 again:
7521         ret = block_rsv_use_bytes(block_rsv, blocksize);
7522         if (!ret)
7523                 return block_rsv;
7524
7525         if (block_rsv->failfast)
7526                 return ERR_PTR(ret);
7527
7528         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7529                 global_updated = true;
7530                 update_global_block_rsv(root->fs_info);
7531                 goto again;
7532         }
7533
7534         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7535                 static DEFINE_RATELIMIT_STATE(_rs,
7536                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7537                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7538                 if (__ratelimit(&_rs))
7539                         WARN(1, KERN_DEBUG
7540                                 "BTRFS: block rsv returned %d\n", ret);
7541         }
7542 try_reserve:
7543         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7544                                      BTRFS_RESERVE_NO_FLUSH);
7545         if (!ret)
7546                 return block_rsv;
7547         /*
7548          * If we couldn't reserve metadata bytes try and use some from
7549          * the global reserve if its space type is the same as the global
7550          * reservation.
7551          */
7552         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7553             block_rsv->space_info == global_rsv->space_info) {
7554                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7555                 if (!ret)
7556                         return global_rsv;
7557         }
7558         return ERR_PTR(ret);
7559 }
7560
7561 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7562                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7563 {
7564         block_rsv_add_bytes(block_rsv, blocksize, 0);
7565         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7566 }
7567
7568 /*
7569  * finds a free extent and does all the dirty work required for allocation
7570  * returns the key for the extent through ins, and a tree buffer for
7571  * the first block of the extent through buf.
7572  *
7573  * returns the tree buffer or an ERR_PTR on error.
7574  */
7575 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7576                                         struct btrfs_root *root,
7577                                         u64 parent, u64 root_objectid,
7578                                         struct btrfs_disk_key *key, int level,
7579                                         u64 hint, u64 empty_size)
7580 {
7581         struct btrfs_key ins;
7582         struct btrfs_block_rsv *block_rsv;
7583         struct extent_buffer *buf;
7584         struct btrfs_delayed_extent_op *extent_op;
7585         u64 flags = 0;
7586         int ret;
7587         u32 blocksize = root->nodesize;
7588         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7589                                                  SKINNY_METADATA);
7590
7591         if (btrfs_test_is_dummy_root(root)) {
7592                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7593                                             level);
7594                 if (!IS_ERR(buf))
7595                         root->alloc_bytenr += blocksize;
7596                 return buf;
7597         }
7598
7599         block_rsv = use_block_rsv(trans, root, blocksize);
7600         if (IS_ERR(block_rsv))
7601                 return ERR_CAST(block_rsv);
7602
7603         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7604                                    empty_size, hint, &ins, 0, 0);
7605         if (ret)
7606                 goto out_unuse;
7607
7608         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7609         if (IS_ERR(buf)) {
7610                 ret = PTR_ERR(buf);
7611                 goto out_free_reserved;
7612         }
7613
7614         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7615                 if (parent == 0)
7616                         parent = ins.objectid;
7617                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7618         } else
7619                 BUG_ON(parent > 0);
7620
7621         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7622                 extent_op = btrfs_alloc_delayed_extent_op();
7623                 if (!extent_op) {
7624                         ret = -ENOMEM;
7625                         goto out_free_buf;
7626                 }
7627                 if (key)
7628                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7629                 else
7630                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7631                 extent_op->flags_to_set = flags;
7632                 if (skinny_metadata)
7633                         extent_op->update_key = 0;
7634                 else
7635                         extent_op->update_key = 1;
7636                 extent_op->update_flags = 1;
7637                 extent_op->is_data = 0;
7638                 extent_op->level = level;
7639
7640                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7641                                                  ins.objectid, ins.offset,
7642                                                  parent, root_objectid, level,
7643                                                  BTRFS_ADD_DELAYED_EXTENT,
7644                                                  extent_op, 0);
7645                 if (ret)
7646                         goto out_free_delayed;
7647         }
7648         return buf;
7649
7650 out_free_delayed:
7651         btrfs_free_delayed_extent_op(extent_op);
7652 out_free_buf:
7653         free_extent_buffer(buf);
7654 out_free_reserved:
7655         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7656 out_unuse:
7657         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7658         return ERR_PTR(ret);
7659 }
7660
7661 struct walk_control {
7662         u64 refs[BTRFS_MAX_LEVEL];
7663         u64 flags[BTRFS_MAX_LEVEL];
7664         struct btrfs_key update_progress;
7665         int stage;
7666         int level;
7667         int shared_level;
7668         int update_ref;
7669         int keep_locks;
7670         int reada_slot;
7671         int reada_count;
7672         int for_reloc;
7673 };
7674
7675 #define DROP_REFERENCE  1
7676 #define UPDATE_BACKREF  2
7677
7678 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7679                                      struct btrfs_root *root,
7680                                      struct walk_control *wc,
7681                                      struct btrfs_path *path)
7682 {
7683         u64 bytenr;
7684         u64 generation;
7685         u64 refs;
7686         u64 flags;
7687         u32 nritems;
7688         u32 blocksize;
7689         struct btrfs_key key;
7690         struct extent_buffer *eb;
7691         int ret;
7692         int slot;
7693         int nread = 0;
7694
7695         if (path->slots[wc->level] < wc->reada_slot) {
7696                 wc->reada_count = wc->reada_count * 2 / 3;
7697                 wc->reada_count = max(wc->reada_count, 2);
7698         } else {
7699                 wc->reada_count = wc->reada_count * 3 / 2;
7700                 wc->reada_count = min_t(int, wc->reada_count,
7701                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7702         }
7703
7704         eb = path->nodes[wc->level];
7705         nritems = btrfs_header_nritems(eb);
7706         blocksize = root->nodesize;
7707
7708         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7709                 if (nread >= wc->reada_count)
7710                         break;
7711
7712                 cond_resched();
7713                 bytenr = btrfs_node_blockptr(eb, slot);
7714                 generation = btrfs_node_ptr_generation(eb, slot);
7715
7716                 if (slot == path->slots[wc->level])
7717                         goto reada;
7718
7719                 if (wc->stage == UPDATE_BACKREF &&
7720                     generation <= root->root_key.offset)
7721                         continue;
7722
7723                 /* We don't lock the tree block, it's OK to be racy here */
7724                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7725                                                wc->level - 1, 1, &refs,
7726                                                &flags);
7727                 /* We don't care about errors in readahead. */
7728                 if (ret < 0)
7729                         continue;
7730                 BUG_ON(refs == 0);
7731
7732                 if (wc->stage == DROP_REFERENCE) {
7733                         if (refs == 1)
7734                                 goto reada;
7735
7736                         if (wc->level == 1 &&
7737                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7738                                 continue;
7739                         if (!wc->update_ref ||
7740                             generation <= root->root_key.offset)
7741                                 continue;
7742                         btrfs_node_key_to_cpu(eb, &key, slot);
7743                         ret = btrfs_comp_cpu_keys(&key,
7744                                                   &wc->update_progress);
7745                         if (ret < 0)
7746                                 continue;
7747                 } else {
7748                         if (wc->level == 1 &&
7749                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7750                                 continue;
7751                 }
7752 reada:
7753                 readahead_tree_block(root, bytenr);
7754                 nread++;
7755         }
7756         wc->reada_slot = slot;
7757 }
7758
7759 /*
7760  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7761  * for later qgroup accounting.
7762  *
7763  * Current, this function does nothing.
7764  */
7765 static int account_leaf_items(struct btrfs_trans_handle *trans,
7766                               struct btrfs_root *root,
7767                               struct extent_buffer *eb)
7768 {
7769         int nr = btrfs_header_nritems(eb);
7770         int i, extent_type;
7771         struct btrfs_key key;
7772         struct btrfs_file_extent_item *fi;
7773         u64 bytenr, num_bytes;
7774
7775         for (i = 0; i < nr; i++) {
7776                 btrfs_item_key_to_cpu(eb, &key, i);
7777
7778                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7779                         continue;
7780
7781                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7782                 /* filter out non qgroup-accountable extents  */
7783                 extent_type = btrfs_file_extent_type(eb, fi);
7784
7785                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7786                         continue;
7787
7788                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7789                 if (!bytenr)
7790                         continue;
7791
7792                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7793         }
7794         return 0;
7795 }
7796
7797 /*
7798  * Walk up the tree from the bottom, freeing leaves and any interior
7799  * nodes which have had all slots visited. If a node (leaf or
7800  * interior) is freed, the node above it will have it's slot
7801  * incremented. The root node will never be freed.
7802  *
7803  * At the end of this function, we should have a path which has all
7804  * slots incremented to the next position for a search. If we need to
7805  * read a new node it will be NULL and the node above it will have the
7806  * correct slot selected for a later read.
7807  *
7808  * If we increment the root nodes slot counter past the number of
7809  * elements, 1 is returned to signal completion of the search.
7810  */
7811 static int adjust_slots_upwards(struct btrfs_root *root,
7812                                 struct btrfs_path *path, int root_level)
7813 {
7814         int level = 0;
7815         int nr, slot;
7816         struct extent_buffer *eb;
7817
7818         if (root_level == 0)
7819                 return 1;
7820
7821         while (level <= root_level) {
7822                 eb = path->nodes[level];
7823                 nr = btrfs_header_nritems(eb);
7824                 path->slots[level]++;
7825                 slot = path->slots[level];
7826                 if (slot >= nr || level == 0) {
7827                         /*
7828                          * Don't free the root -  we will detect this
7829                          * condition after our loop and return a
7830                          * positive value for caller to stop walking the tree.
7831                          */
7832                         if (level != root_level) {
7833                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7834                                 path->locks[level] = 0;
7835
7836                                 free_extent_buffer(eb);
7837                                 path->nodes[level] = NULL;
7838                                 path->slots[level] = 0;
7839                         }
7840                 } else {
7841                         /*
7842                          * We have a valid slot to walk back down
7843                          * from. Stop here so caller can process these
7844                          * new nodes.
7845                          */
7846                         break;
7847                 }
7848
7849                 level++;
7850         }
7851
7852         eb = path->nodes[root_level];
7853         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7854                 return 1;
7855
7856         return 0;
7857 }
7858
7859 /*
7860  * root_eb is the subtree root and is locked before this function is called.
7861  * TODO: Modify this function to mark all (including complete shared node)
7862  * to dirty_extent_root to allow it get accounted in qgroup.
7863  */
7864 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7865                                   struct btrfs_root *root,
7866                                   struct extent_buffer *root_eb,
7867                                   u64 root_gen,
7868                                   int root_level)
7869 {
7870         int ret = 0;
7871         int level;
7872         struct extent_buffer *eb = root_eb;
7873         struct btrfs_path *path = NULL;
7874
7875         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7876         BUG_ON(root_eb == NULL);
7877
7878         if (!root->fs_info->quota_enabled)
7879                 return 0;
7880
7881         if (!extent_buffer_uptodate(root_eb)) {
7882                 ret = btrfs_read_buffer(root_eb, root_gen);
7883                 if (ret)
7884                         goto out;
7885         }
7886
7887         if (root_level == 0) {
7888                 ret = account_leaf_items(trans, root, root_eb);
7889                 goto out;
7890         }
7891
7892         path = btrfs_alloc_path();
7893         if (!path)
7894                 return -ENOMEM;
7895
7896         /*
7897          * Walk down the tree.  Missing extent blocks are filled in as
7898          * we go. Metadata is accounted every time we read a new
7899          * extent block.
7900          *
7901          * When we reach a leaf, we account for file extent items in it,
7902          * walk back up the tree (adjusting slot pointers as we go)
7903          * and restart the search process.
7904          */
7905         extent_buffer_get(root_eb); /* For path */
7906         path->nodes[root_level] = root_eb;
7907         path->slots[root_level] = 0;
7908         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7909 walk_down:
7910         level = root_level;
7911         while (level >= 0) {
7912                 if (path->nodes[level] == NULL) {
7913                         int parent_slot;
7914                         u64 child_gen;
7915                         u64 child_bytenr;
7916
7917                         /* We need to get child blockptr/gen from
7918                          * parent before we can read it. */
7919                         eb = path->nodes[level + 1];
7920                         parent_slot = path->slots[level + 1];
7921                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7922                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7923
7924                         eb = read_tree_block(root, child_bytenr, child_gen);
7925                         if (IS_ERR(eb)) {
7926                                 ret = PTR_ERR(eb);
7927                                 goto out;
7928                         } else if (!extent_buffer_uptodate(eb)) {
7929                                 free_extent_buffer(eb);
7930                                 ret = -EIO;
7931                                 goto out;
7932                         }
7933
7934                         path->nodes[level] = eb;
7935                         path->slots[level] = 0;
7936
7937                         btrfs_tree_read_lock(eb);
7938                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7939                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7940                 }
7941
7942                 if (level == 0) {
7943                         ret = account_leaf_items(trans, root, path->nodes[level]);
7944                         if (ret)
7945                                 goto out;
7946
7947                         /* Nonzero return here means we completed our search */
7948                         ret = adjust_slots_upwards(root, path, root_level);
7949                         if (ret)
7950                                 break;
7951
7952                         /* Restart search with new slots */
7953                         goto walk_down;
7954                 }
7955
7956                 level--;
7957         }
7958
7959         ret = 0;
7960 out:
7961         btrfs_free_path(path);
7962
7963         return ret;
7964 }
7965
7966 /*
7967  * helper to process tree block while walking down the tree.
7968  *
7969  * when wc->stage == UPDATE_BACKREF, this function updates
7970  * back refs for pointers in the block.
7971  *
7972  * NOTE: return value 1 means we should stop walking down.
7973  */
7974 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7975                                    struct btrfs_root *root,
7976                                    struct btrfs_path *path,
7977                                    struct walk_control *wc, int lookup_info)
7978 {
7979         int level = wc->level;
7980         struct extent_buffer *eb = path->nodes[level];
7981         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7982         int ret;
7983
7984         if (wc->stage == UPDATE_BACKREF &&
7985             btrfs_header_owner(eb) != root->root_key.objectid)
7986                 return 1;
7987
7988         /*
7989          * when reference count of tree block is 1, it won't increase
7990          * again. once full backref flag is set, we never clear it.
7991          */
7992         if (lookup_info &&
7993             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7994              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7995                 BUG_ON(!path->locks[level]);
7996                 ret = btrfs_lookup_extent_info(trans, root,
7997                                                eb->start, level, 1,
7998                                                &wc->refs[level],
7999                                                &wc->flags[level]);
8000                 BUG_ON(ret == -ENOMEM);
8001                 if (ret)
8002                         return ret;
8003                 BUG_ON(wc->refs[level] == 0);
8004         }
8005
8006         if (wc->stage == DROP_REFERENCE) {
8007                 if (wc->refs[level] > 1)
8008                         return 1;
8009
8010                 if (path->locks[level] && !wc->keep_locks) {
8011                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8012                         path->locks[level] = 0;
8013                 }
8014                 return 0;
8015         }
8016
8017         /* wc->stage == UPDATE_BACKREF */
8018         if (!(wc->flags[level] & flag)) {
8019                 BUG_ON(!path->locks[level]);
8020                 ret = btrfs_inc_ref(trans, root, eb, 1);
8021                 BUG_ON(ret); /* -ENOMEM */
8022                 ret = btrfs_dec_ref(trans, root, eb, 0);
8023                 BUG_ON(ret); /* -ENOMEM */
8024                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8025                                                   eb->len, flag,
8026                                                   btrfs_header_level(eb), 0);
8027                 BUG_ON(ret); /* -ENOMEM */
8028                 wc->flags[level] |= flag;
8029         }
8030
8031         /*
8032          * the block is shared by multiple trees, so it's not good to
8033          * keep the tree lock
8034          */
8035         if (path->locks[level] && level > 0) {
8036                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8037                 path->locks[level] = 0;
8038         }
8039         return 0;
8040 }
8041
8042 /*
8043  * helper to process tree block pointer.
8044  *
8045  * when wc->stage == DROP_REFERENCE, this function checks
8046  * reference count of the block pointed to. if the block
8047  * is shared and we need update back refs for the subtree
8048  * rooted at the block, this function changes wc->stage to
8049  * UPDATE_BACKREF. if the block is shared and there is no
8050  * need to update back, this function drops the reference
8051  * to the block.
8052  *
8053  * NOTE: return value 1 means we should stop walking down.
8054  */
8055 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8056                                  struct btrfs_root *root,
8057                                  struct btrfs_path *path,
8058                                  struct walk_control *wc, int *lookup_info)
8059 {
8060         u64 bytenr;
8061         u64 generation;
8062         u64 parent;
8063         u32 blocksize;
8064         struct btrfs_key key;
8065         struct extent_buffer *next;
8066         int level = wc->level;
8067         int reada = 0;
8068         int ret = 0;
8069         bool need_account = false;
8070
8071         generation = btrfs_node_ptr_generation(path->nodes[level],
8072                                                path->slots[level]);
8073         /*
8074          * if the lower level block was created before the snapshot
8075          * was created, we know there is no need to update back refs
8076          * for the subtree
8077          */
8078         if (wc->stage == UPDATE_BACKREF &&
8079             generation <= root->root_key.offset) {
8080                 *lookup_info = 1;
8081                 return 1;
8082         }
8083
8084         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8085         blocksize = root->nodesize;
8086
8087         next = btrfs_find_tree_block(root->fs_info, bytenr);
8088         if (!next) {
8089                 next = btrfs_find_create_tree_block(root, bytenr);
8090                 if (!next)
8091                         return -ENOMEM;
8092                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8093                                                level - 1);
8094                 reada = 1;
8095         }
8096         btrfs_tree_lock(next);
8097         btrfs_set_lock_blocking(next);
8098
8099         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8100                                        &wc->refs[level - 1],
8101                                        &wc->flags[level - 1]);
8102         if (ret < 0) {
8103                 btrfs_tree_unlock(next);
8104                 return ret;
8105         }
8106
8107         if (unlikely(wc->refs[level - 1] == 0)) {
8108                 btrfs_err(root->fs_info, "Missing references.");
8109                 BUG();
8110         }
8111         *lookup_info = 0;
8112
8113         if (wc->stage == DROP_REFERENCE) {
8114                 if (wc->refs[level - 1] > 1) {
8115                         need_account = true;
8116                         if (level == 1 &&
8117                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8118                                 goto skip;
8119
8120                         if (!wc->update_ref ||
8121                             generation <= root->root_key.offset)
8122                                 goto skip;
8123
8124                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8125                                               path->slots[level]);
8126                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8127                         if (ret < 0)
8128                                 goto skip;
8129
8130                         wc->stage = UPDATE_BACKREF;
8131                         wc->shared_level = level - 1;
8132                 }
8133         } else {
8134                 if (level == 1 &&
8135                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8136                         goto skip;
8137         }
8138
8139         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8140                 btrfs_tree_unlock(next);
8141                 free_extent_buffer(next);
8142                 next = NULL;
8143                 *lookup_info = 1;
8144         }
8145
8146         if (!next) {
8147                 if (reada && level == 1)
8148                         reada_walk_down(trans, root, wc, path);
8149                 next = read_tree_block(root, bytenr, generation);
8150                 if (IS_ERR(next)) {
8151                         return PTR_ERR(next);
8152                 } else if (!extent_buffer_uptodate(next)) {
8153                         free_extent_buffer(next);
8154                         return -EIO;
8155                 }
8156                 btrfs_tree_lock(next);
8157                 btrfs_set_lock_blocking(next);
8158         }
8159
8160         level--;
8161         BUG_ON(level != btrfs_header_level(next));
8162         path->nodes[level] = next;
8163         path->slots[level] = 0;
8164         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8165         wc->level = level;
8166         if (wc->level == 1)
8167                 wc->reada_slot = 0;
8168         return 0;
8169 skip:
8170         wc->refs[level - 1] = 0;
8171         wc->flags[level - 1] = 0;
8172         if (wc->stage == DROP_REFERENCE) {
8173                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8174                         parent = path->nodes[level]->start;
8175                 } else {
8176                         BUG_ON(root->root_key.objectid !=
8177                                btrfs_header_owner(path->nodes[level]));
8178                         parent = 0;
8179                 }
8180
8181                 if (need_account) {
8182                         ret = account_shared_subtree(trans, root, next,
8183                                                      generation, level - 1);
8184                         if (ret) {
8185                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8186                                         "%d accounting shared subtree. Quota "
8187                                         "is out of sync, rescan required.\n",
8188                                         root->fs_info->sb->s_id, ret);
8189                         }
8190                 }
8191                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8192                                 root->root_key.objectid, level - 1, 0, 0);
8193                 BUG_ON(ret); /* -ENOMEM */
8194         }
8195         btrfs_tree_unlock(next);
8196         free_extent_buffer(next);
8197         *lookup_info = 1;
8198         return 1;
8199 }
8200
8201 /*
8202  * helper to process tree block while walking up the tree.
8203  *
8204  * when wc->stage == DROP_REFERENCE, this function drops
8205  * reference count on the block.
8206  *
8207  * when wc->stage == UPDATE_BACKREF, this function changes
8208  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8209  * to UPDATE_BACKREF previously while processing the block.
8210  *
8211  * NOTE: return value 1 means we should stop walking up.
8212  */
8213 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8214                                  struct btrfs_root *root,
8215                                  struct btrfs_path *path,
8216                                  struct walk_control *wc)
8217 {
8218         int ret;
8219         int level = wc->level;
8220         struct extent_buffer *eb = path->nodes[level];
8221         u64 parent = 0;
8222
8223         if (wc->stage == UPDATE_BACKREF) {
8224                 BUG_ON(wc->shared_level < level);
8225                 if (level < wc->shared_level)
8226                         goto out;
8227
8228                 ret = find_next_key(path, level + 1, &wc->update_progress);
8229                 if (ret > 0)
8230                         wc->update_ref = 0;
8231
8232                 wc->stage = DROP_REFERENCE;
8233                 wc->shared_level = -1;
8234                 path->slots[level] = 0;
8235
8236                 /*
8237                  * check reference count again if the block isn't locked.
8238                  * we should start walking down the tree again if reference
8239                  * count is one.
8240                  */
8241                 if (!path->locks[level]) {
8242                         BUG_ON(level == 0);
8243                         btrfs_tree_lock(eb);
8244                         btrfs_set_lock_blocking(eb);
8245                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8246
8247                         ret = btrfs_lookup_extent_info(trans, root,
8248                                                        eb->start, level, 1,
8249                                                        &wc->refs[level],
8250                                                        &wc->flags[level]);
8251                         if (ret < 0) {
8252                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8253                                 path->locks[level] = 0;
8254                                 return ret;
8255                         }
8256                         BUG_ON(wc->refs[level] == 0);
8257                         if (wc->refs[level] == 1) {
8258                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8259                                 path->locks[level] = 0;
8260                                 return 1;
8261                         }
8262                 }
8263         }
8264
8265         /* wc->stage == DROP_REFERENCE */
8266         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8267
8268         if (wc->refs[level] == 1) {
8269                 if (level == 0) {
8270                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8271                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8272                         else
8273                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8274                         BUG_ON(ret); /* -ENOMEM */
8275                         ret = account_leaf_items(trans, root, eb);
8276                         if (ret) {
8277                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8278                                         "%d accounting leaf items. Quota "
8279                                         "is out of sync, rescan required.\n",
8280                                         root->fs_info->sb->s_id, ret);
8281                         }
8282                 }
8283                 /* make block locked assertion in clean_tree_block happy */
8284                 if (!path->locks[level] &&
8285                     btrfs_header_generation(eb) == trans->transid) {
8286                         btrfs_tree_lock(eb);
8287                         btrfs_set_lock_blocking(eb);
8288                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8289                 }
8290                 clean_tree_block(trans, root->fs_info, eb);
8291         }
8292
8293         if (eb == root->node) {
8294                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8295                         parent = eb->start;
8296                 else
8297                         BUG_ON(root->root_key.objectid !=
8298                                btrfs_header_owner(eb));
8299         } else {
8300                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8301                         parent = path->nodes[level + 1]->start;
8302                 else
8303                         BUG_ON(root->root_key.objectid !=
8304                                btrfs_header_owner(path->nodes[level + 1]));
8305         }
8306
8307         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8308 out:
8309         wc->refs[level] = 0;
8310         wc->flags[level] = 0;
8311         return 0;
8312 }
8313
8314 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8315                                    struct btrfs_root *root,
8316                                    struct btrfs_path *path,
8317                                    struct walk_control *wc)
8318 {
8319         int level = wc->level;
8320         int lookup_info = 1;
8321         int ret;
8322
8323         while (level >= 0) {
8324                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8325                 if (ret > 0)
8326                         break;
8327
8328                 if (level == 0)
8329                         break;
8330
8331                 if (path->slots[level] >=
8332                     btrfs_header_nritems(path->nodes[level]))
8333                         break;
8334
8335                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8336                 if (ret > 0) {
8337                         path->slots[level]++;
8338                         continue;
8339                 } else if (ret < 0)
8340                         return ret;
8341                 level = wc->level;
8342         }
8343         return 0;
8344 }
8345
8346 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8347                                  struct btrfs_root *root,
8348                                  struct btrfs_path *path,
8349                                  struct walk_control *wc, int max_level)
8350 {
8351         int level = wc->level;
8352         int ret;
8353
8354         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8355         while (level < max_level && path->nodes[level]) {
8356                 wc->level = level;
8357                 if (path->slots[level] + 1 <
8358                     btrfs_header_nritems(path->nodes[level])) {
8359                         path->slots[level]++;
8360                         return 0;
8361                 } else {
8362                         ret = walk_up_proc(trans, root, path, wc);
8363                         if (ret > 0)
8364                                 return 0;
8365
8366                         if (path->locks[level]) {
8367                                 btrfs_tree_unlock_rw(path->nodes[level],
8368                                                      path->locks[level]);
8369                                 path->locks[level] = 0;
8370                         }
8371                         free_extent_buffer(path->nodes[level]);
8372                         path->nodes[level] = NULL;
8373                         level++;
8374                 }
8375         }
8376         return 1;
8377 }
8378
8379 /*
8380  * drop a subvolume tree.
8381  *
8382  * this function traverses the tree freeing any blocks that only
8383  * referenced by the tree.
8384  *
8385  * when a shared tree block is found. this function decreases its
8386  * reference count by one. if update_ref is true, this function
8387  * also make sure backrefs for the shared block and all lower level
8388  * blocks are properly updated.
8389  *
8390  * If called with for_reloc == 0, may exit early with -EAGAIN
8391  */
8392 int btrfs_drop_snapshot(struct btrfs_root *root,
8393                          struct btrfs_block_rsv *block_rsv, int update_ref,
8394                          int for_reloc)
8395 {
8396         struct btrfs_path *path;
8397         struct btrfs_trans_handle *trans;
8398         struct btrfs_root *tree_root = root->fs_info->tree_root;
8399         struct btrfs_root_item *root_item = &root->root_item;
8400         struct walk_control *wc;
8401         struct btrfs_key key;
8402         int err = 0;
8403         int ret;
8404         int level;
8405         bool root_dropped = false;
8406
8407         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8408
8409         path = btrfs_alloc_path();
8410         if (!path) {
8411                 err = -ENOMEM;
8412                 goto out;
8413         }
8414
8415         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8416         if (!wc) {
8417                 btrfs_free_path(path);
8418                 err = -ENOMEM;
8419                 goto out;
8420         }
8421
8422         trans = btrfs_start_transaction(tree_root, 0);
8423         if (IS_ERR(trans)) {
8424                 err = PTR_ERR(trans);
8425                 goto out_free;
8426         }
8427
8428         if (block_rsv)
8429                 trans->block_rsv = block_rsv;
8430
8431         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8432                 level = btrfs_header_level(root->node);
8433                 path->nodes[level] = btrfs_lock_root_node(root);
8434                 btrfs_set_lock_blocking(path->nodes[level]);
8435                 path->slots[level] = 0;
8436                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8437                 memset(&wc->update_progress, 0,
8438                        sizeof(wc->update_progress));
8439         } else {
8440                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8441                 memcpy(&wc->update_progress, &key,
8442                        sizeof(wc->update_progress));
8443
8444                 level = root_item->drop_level;
8445                 BUG_ON(level == 0);
8446                 path->lowest_level = level;
8447                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8448                 path->lowest_level = 0;
8449                 if (ret < 0) {
8450                         err = ret;
8451                         goto out_end_trans;
8452                 }
8453                 WARN_ON(ret > 0);
8454
8455                 /*
8456                  * unlock our path, this is safe because only this
8457                  * function is allowed to delete this snapshot
8458                  */
8459                 btrfs_unlock_up_safe(path, 0);
8460
8461                 level = btrfs_header_level(root->node);
8462                 while (1) {
8463                         btrfs_tree_lock(path->nodes[level]);
8464                         btrfs_set_lock_blocking(path->nodes[level]);
8465                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8466
8467                         ret = btrfs_lookup_extent_info(trans, root,
8468                                                 path->nodes[level]->start,
8469                                                 level, 1, &wc->refs[level],
8470                                                 &wc->flags[level]);
8471                         if (ret < 0) {
8472                                 err = ret;
8473                                 goto out_end_trans;
8474                         }
8475                         BUG_ON(wc->refs[level] == 0);
8476
8477                         if (level == root_item->drop_level)
8478                                 break;
8479
8480                         btrfs_tree_unlock(path->nodes[level]);
8481                         path->locks[level] = 0;
8482                         WARN_ON(wc->refs[level] != 1);
8483                         level--;
8484                 }
8485         }
8486
8487         wc->level = level;
8488         wc->shared_level = -1;
8489         wc->stage = DROP_REFERENCE;
8490         wc->update_ref = update_ref;
8491         wc->keep_locks = 0;
8492         wc->for_reloc = for_reloc;
8493         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8494
8495         while (1) {
8496
8497                 ret = walk_down_tree(trans, root, path, wc);
8498                 if (ret < 0) {
8499                         err = ret;
8500                         break;
8501                 }
8502
8503                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8504                 if (ret < 0) {
8505                         err = ret;
8506                         break;
8507                 }
8508
8509                 if (ret > 0) {
8510                         BUG_ON(wc->stage != DROP_REFERENCE);
8511                         break;
8512                 }
8513
8514                 if (wc->stage == DROP_REFERENCE) {
8515                         level = wc->level;
8516                         btrfs_node_key(path->nodes[level],
8517                                        &root_item->drop_progress,
8518                                        path->slots[level]);
8519                         root_item->drop_level = level;
8520                 }
8521
8522                 BUG_ON(wc->level == 0);
8523                 if (btrfs_should_end_transaction(trans, tree_root) ||
8524                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8525                         ret = btrfs_update_root(trans, tree_root,
8526                                                 &root->root_key,
8527                                                 root_item);
8528                         if (ret) {
8529                                 btrfs_abort_transaction(trans, tree_root, ret);
8530                                 err = ret;
8531                                 goto out_end_trans;
8532                         }
8533
8534                         btrfs_end_transaction_throttle(trans, tree_root);
8535                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8536                                 pr_debug("BTRFS: drop snapshot early exit\n");
8537                                 err = -EAGAIN;
8538                                 goto out_free;
8539                         }
8540
8541                         trans = btrfs_start_transaction(tree_root, 0);
8542                         if (IS_ERR(trans)) {
8543                                 err = PTR_ERR(trans);
8544                                 goto out_free;
8545                         }
8546                         if (block_rsv)
8547                                 trans->block_rsv = block_rsv;
8548                 }
8549         }
8550         btrfs_release_path(path);
8551         if (err)
8552                 goto out_end_trans;
8553
8554         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8555         if (ret) {
8556                 btrfs_abort_transaction(trans, tree_root, ret);
8557                 goto out_end_trans;
8558         }
8559
8560         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8561                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8562                                       NULL, NULL);
8563                 if (ret < 0) {
8564                         btrfs_abort_transaction(trans, tree_root, ret);
8565                         err = ret;
8566                         goto out_end_trans;
8567                 } else if (ret > 0) {
8568                         /* if we fail to delete the orphan item this time
8569                          * around, it'll get picked up the next time.
8570                          *
8571                          * The most common failure here is just -ENOENT.
8572                          */
8573                         btrfs_del_orphan_item(trans, tree_root,
8574                                               root->root_key.objectid);
8575                 }
8576         }
8577
8578         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8579                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8580         } else {
8581                 free_extent_buffer(root->node);
8582                 free_extent_buffer(root->commit_root);
8583                 btrfs_put_fs_root(root);
8584         }
8585         root_dropped = true;
8586 out_end_trans:
8587         btrfs_end_transaction_throttle(trans, tree_root);
8588 out_free:
8589         kfree(wc);
8590         btrfs_free_path(path);
8591 out:
8592         /*
8593          * So if we need to stop dropping the snapshot for whatever reason we
8594          * need to make sure to add it back to the dead root list so that we
8595          * keep trying to do the work later.  This also cleans up roots if we
8596          * don't have it in the radix (like when we recover after a power fail
8597          * or unmount) so we don't leak memory.
8598          */
8599         if (!for_reloc && root_dropped == false)
8600                 btrfs_add_dead_root(root);
8601         if (err && err != -EAGAIN)
8602                 btrfs_std_error(root->fs_info, err);
8603         return err;
8604 }
8605
8606 /*
8607  * drop subtree rooted at tree block 'node'.
8608  *
8609  * NOTE: this function will unlock and release tree block 'node'
8610  * only used by relocation code
8611  */
8612 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8613                         struct btrfs_root *root,
8614                         struct extent_buffer *node,
8615                         struct extent_buffer *parent)
8616 {
8617         struct btrfs_path *path;
8618         struct walk_control *wc;
8619         int level;
8620         int parent_level;
8621         int ret = 0;
8622         int wret;
8623
8624         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8625
8626         path = btrfs_alloc_path();
8627         if (!path)
8628                 return -ENOMEM;
8629
8630         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8631         if (!wc) {
8632                 btrfs_free_path(path);
8633                 return -ENOMEM;
8634         }
8635
8636         btrfs_assert_tree_locked(parent);
8637         parent_level = btrfs_header_level(parent);
8638         extent_buffer_get(parent);
8639         path->nodes[parent_level] = parent;
8640         path->slots[parent_level] = btrfs_header_nritems(parent);
8641
8642         btrfs_assert_tree_locked(node);
8643         level = btrfs_header_level(node);
8644         path->nodes[level] = node;
8645         path->slots[level] = 0;
8646         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8647
8648         wc->refs[parent_level] = 1;
8649         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8650         wc->level = level;
8651         wc->shared_level = -1;
8652         wc->stage = DROP_REFERENCE;
8653         wc->update_ref = 0;
8654         wc->keep_locks = 1;
8655         wc->for_reloc = 1;
8656         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8657
8658         while (1) {
8659                 wret = walk_down_tree(trans, root, path, wc);
8660                 if (wret < 0) {
8661                         ret = wret;
8662                         break;
8663                 }
8664
8665                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8666                 if (wret < 0)
8667                         ret = wret;
8668                 if (wret != 0)
8669                         break;
8670         }
8671
8672         kfree(wc);
8673         btrfs_free_path(path);
8674         return ret;
8675 }
8676
8677 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8678 {
8679         u64 num_devices;
8680         u64 stripped;
8681
8682         /*
8683          * if restripe for this chunk_type is on pick target profile and
8684          * return, otherwise do the usual balance
8685          */
8686         stripped = get_restripe_target(root->fs_info, flags);
8687         if (stripped)
8688                 return extended_to_chunk(stripped);
8689
8690         num_devices = root->fs_info->fs_devices->rw_devices;
8691
8692         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8693                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8694                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8695
8696         if (num_devices == 1) {
8697                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8698                 stripped = flags & ~stripped;
8699
8700                 /* turn raid0 into single device chunks */
8701                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8702                         return stripped;
8703
8704                 /* turn mirroring into duplication */
8705                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8706                              BTRFS_BLOCK_GROUP_RAID10))
8707                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8708         } else {
8709                 /* they already had raid on here, just return */
8710                 if (flags & stripped)
8711                         return flags;
8712
8713                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8714                 stripped = flags & ~stripped;
8715
8716                 /* switch duplicated blocks with raid1 */
8717                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8718                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8719
8720                 /* this is drive concat, leave it alone */
8721         }
8722
8723         return flags;
8724 }
8725
8726 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8727 {
8728         struct btrfs_space_info *sinfo = cache->space_info;
8729         u64 num_bytes;
8730         u64 min_allocable_bytes;
8731         int ret = -ENOSPC;
8732
8733
8734         /*
8735          * We need some metadata space and system metadata space for
8736          * allocating chunks in some corner cases until we force to set
8737          * it to be readonly.
8738          */
8739         if ((sinfo->flags &
8740              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8741             !force)
8742                 min_allocable_bytes = 1 * 1024 * 1024;
8743         else
8744                 min_allocable_bytes = 0;
8745
8746         spin_lock(&sinfo->lock);
8747         spin_lock(&cache->lock);
8748
8749         if (cache->ro) {
8750                 ret = 0;
8751                 goto out;
8752         }
8753
8754         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8755                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8756
8757         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8758             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8759             min_allocable_bytes <= sinfo->total_bytes) {
8760                 sinfo->bytes_readonly += num_bytes;
8761                 cache->ro = 1;
8762                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8763                 ret = 0;
8764         }
8765 out:
8766         spin_unlock(&cache->lock);
8767         spin_unlock(&sinfo->lock);
8768         return ret;
8769 }
8770
8771 int btrfs_set_block_group_ro(struct btrfs_root *root,
8772                              struct btrfs_block_group_cache *cache)
8773
8774 {
8775         struct btrfs_trans_handle *trans;
8776         u64 alloc_flags;
8777         int ret;
8778
8779         BUG_ON(cache->ro);
8780
8781 again:
8782         trans = btrfs_join_transaction(root);
8783         if (IS_ERR(trans))
8784                 return PTR_ERR(trans);
8785
8786         /*
8787          * we're not allowed to set block groups readonly after the dirty
8788          * block groups cache has started writing.  If it already started,
8789          * back off and let this transaction commit
8790          */
8791         mutex_lock(&root->fs_info->ro_block_group_mutex);
8792         if (trans->transaction->dirty_bg_run) {
8793                 u64 transid = trans->transid;
8794
8795                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8796                 btrfs_end_transaction(trans, root);
8797
8798                 ret = btrfs_wait_for_commit(root, transid);
8799                 if (ret)
8800                         return ret;
8801                 goto again;
8802         }
8803
8804         /*
8805          * if we are changing raid levels, try to allocate a corresponding
8806          * block group with the new raid level.
8807          */
8808         alloc_flags = update_block_group_flags(root, cache->flags);
8809         if (alloc_flags != cache->flags) {
8810                 ret = do_chunk_alloc(trans, root, alloc_flags,
8811                                      CHUNK_ALLOC_FORCE);
8812                 /*
8813                  * ENOSPC is allowed here, we may have enough space
8814                  * already allocated at the new raid level to
8815                  * carry on
8816                  */
8817                 if (ret == -ENOSPC)
8818                         ret = 0;
8819                 if (ret < 0)
8820                         goto out;
8821         }
8822
8823         ret = set_block_group_ro(cache, 0);
8824         if (!ret)
8825                 goto out;
8826         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8827         ret = do_chunk_alloc(trans, root, alloc_flags,
8828                              CHUNK_ALLOC_FORCE);
8829         if (ret < 0)
8830                 goto out;
8831         ret = set_block_group_ro(cache, 0);
8832 out:
8833         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8834                 alloc_flags = update_block_group_flags(root, cache->flags);
8835                 lock_chunks(root->fs_info->chunk_root);
8836                 check_system_chunk(trans, root, alloc_flags);
8837                 unlock_chunks(root->fs_info->chunk_root);
8838         }
8839         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8840
8841         btrfs_end_transaction(trans, root);
8842         return ret;
8843 }
8844
8845 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8846                             struct btrfs_root *root, u64 type)
8847 {
8848         u64 alloc_flags = get_alloc_profile(root, type);
8849         return do_chunk_alloc(trans, root, alloc_flags,
8850                               CHUNK_ALLOC_FORCE);
8851 }
8852
8853 /*
8854  * helper to account the unused space of all the readonly block group in the
8855  * space_info. takes mirrors into account.
8856  */
8857 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8858 {
8859         struct btrfs_block_group_cache *block_group;
8860         u64 free_bytes = 0;
8861         int factor;
8862
8863         /* It's df, we don't care if it's racey */
8864         if (list_empty(&sinfo->ro_bgs))
8865                 return 0;
8866
8867         spin_lock(&sinfo->lock);
8868         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8869                 spin_lock(&block_group->lock);
8870
8871                 if (!block_group->ro) {
8872                         spin_unlock(&block_group->lock);
8873                         continue;
8874                 }
8875
8876                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8877                                           BTRFS_BLOCK_GROUP_RAID10 |
8878                                           BTRFS_BLOCK_GROUP_DUP))
8879                         factor = 2;
8880                 else
8881                         factor = 1;
8882
8883                 free_bytes += (block_group->key.offset -
8884                                btrfs_block_group_used(&block_group->item)) *
8885                                factor;
8886
8887                 spin_unlock(&block_group->lock);
8888         }
8889         spin_unlock(&sinfo->lock);
8890
8891         return free_bytes;
8892 }
8893
8894 void btrfs_set_block_group_rw(struct btrfs_root *root,
8895                               struct btrfs_block_group_cache *cache)
8896 {
8897         struct btrfs_space_info *sinfo = cache->space_info;
8898         u64 num_bytes;
8899
8900         BUG_ON(!cache->ro);
8901
8902         spin_lock(&sinfo->lock);
8903         spin_lock(&cache->lock);
8904         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8905                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8906         sinfo->bytes_readonly -= num_bytes;
8907         cache->ro = 0;
8908         list_del_init(&cache->ro_list);
8909         spin_unlock(&cache->lock);
8910         spin_unlock(&sinfo->lock);
8911 }
8912
8913 /*
8914  * checks to see if its even possible to relocate this block group.
8915  *
8916  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8917  * ok to go ahead and try.
8918  */
8919 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8920 {
8921         struct btrfs_block_group_cache *block_group;
8922         struct btrfs_space_info *space_info;
8923         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8924         struct btrfs_device *device;
8925         struct btrfs_trans_handle *trans;
8926         u64 min_free;
8927         u64 dev_min = 1;
8928         u64 dev_nr = 0;
8929         u64 target;
8930         int index;
8931         int full = 0;
8932         int ret = 0;
8933
8934         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8935
8936         /* odd, couldn't find the block group, leave it alone */
8937         if (!block_group)
8938                 return -1;
8939
8940         min_free = btrfs_block_group_used(&block_group->item);
8941
8942         /* no bytes used, we're good */
8943         if (!min_free)
8944                 goto out;
8945
8946         space_info = block_group->space_info;
8947         spin_lock(&space_info->lock);
8948
8949         full = space_info->full;
8950
8951         /*
8952          * if this is the last block group we have in this space, we can't
8953          * relocate it unless we're able to allocate a new chunk below.
8954          *
8955          * Otherwise, we need to make sure we have room in the space to handle
8956          * all of the extents from this block group.  If we can, we're good
8957          */
8958         if ((space_info->total_bytes != block_group->key.offset) &&
8959             (space_info->bytes_used + space_info->bytes_reserved +
8960              space_info->bytes_pinned + space_info->bytes_readonly +
8961              min_free < space_info->total_bytes)) {
8962                 spin_unlock(&space_info->lock);
8963                 goto out;
8964         }
8965         spin_unlock(&space_info->lock);
8966
8967         /*
8968          * ok we don't have enough space, but maybe we have free space on our
8969          * devices to allocate new chunks for relocation, so loop through our
8970          * alloc devices and guess if we have enough space.  if this block
8971          * group is going to be restriped, run checks against the target
8972          * profile instead of the current one.
8973          */
8974         ret = -1;
8975
8976         /*
8977          * index:
8978          *      0: raid10
8979          *      1: raid1
8980          *      2: dup
8981          *      3: raid0
8982          *      4: single
8983          */
8984         target = get_restripe_target(root->fs_info, block_group->flags);
8985         if (target) {
8986                 index = __get_raid_index(extended_to_chunk(target));
8987         } else {
8988                 /*
8989                  * this is just a balance, so if we were marked as full
8990                  * we know there is no space for a new chunk
8991                  */
8992                 if (full)
8993                         goto out;
8994
8995                 index = get_block_group_index(block_group);
8996         }
8997
8998         if (index == BTRFS_RAID_RAID10) {
8999                 dev_min = 4;
9000                 /* Divide by 2 */
9001                 min_free >>= 1;
9002         } else if (index == BTRFS_RAID_RAID1) {
9003                 dev_min = 2;
9004         } else if (index == BTRFS_RAID_DUP) {
9005                 /* Multiply by 2 */
9006                 min_free <<= 1;
9007         } else if (index == BTRFS_RAID_RAID0) {
9008                 dev_min = fs_devices->rw_devices;
9009                 min_free = div64_u64(min_free, dev_min);
9010         }
9011
9012         /* We need to do this so that we can look at pending chunks */
9013         trans = btrfs_join_transaction(root);
9014         if (IS_ERR(trans)) {
9015                 ret = PTR_ERR(trans);
9016                 goto out;
9017         }
9018
9019         mutex_lock(&root->fs_info->chunk_mutex);
9020         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9021                 u64 dev_offset;
9022
9023                 /*
9024                  * check to make sure we can actually find a chunk with enough
9025                  * space to fit our block group in.
9026                  */
9027                 if (device->total_bytes > device->bytes_used + min_free &&
9028                     !device->is_tgtdev_for_dev_replace) {
9029                         ret = find_free_dev_extent(trans, device, min_free,
9030                                                    &dev_offset, NULL);
9031                         if (!ret)
9032                                 dev_nr++;
9033
9034                         if (dev_nr >= dev_min)
9035                                 break;
9036
9037                         ret = -1;
9038                 }
9039         }
9040         mutex_unlock(&root->fs_info->chunk_mutex);
9041         btrfs_end_transaction(trans, root);
9042 out:
9043         btrfs_put_block_group(block_group);
9044         return ret;
9045 }
9046
9047 static int find_first_block_group(struct btrfs_root *root,
9048                 struct btrfs_path *path, struct btrfs_key *key)
9049 {
9050         int ret = 0;
9051         struct btrfs_key found_key;
9052         struct extent_buffer *leaf;
9053         int slot;
9054
9055         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9056         if (ret < 0)
9057                 goto out;
9058
9059         while (1) {
9060                 slot = path->slots[0];
9061                 leaf = path->nodes[0];
9062                 if (slot >= btrfs_header_nritems(leaf)) {
9063                         ret = btrfs_next_leaf(root, path);
9064                         if (ret == 0)
9065                                 continue;
9066                         if (ret < 0)
9067                                 goto out;
9068                         break;
9069                 }
9070                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9071
9072                 if (found_key.objectid >= key->objectid &&
9073                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9074                         ret = 0;
9075                         goto out;
9076                 }
9077                 path->slots[0]++;
9078         }
9079 out:
9080         return ret;
9081 }
9082
9083 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9084 {
9085         struct btrfs_block_group_cache *block_group;
9086         u64 last = 0;
9087
9088         while (1) {
9089                 struct inode *inode;
9090
9091                 block_group = btrfs_lookup_first_block_group(info, last);
9092                 while (block_group) {
9093                         spin_lock(&block_group->lock);
9094                         if (block_group->iref)
9095                                 break;
9096                         spin_unlock(&block_group->lock);
9097                         block_group = next_block_group(info->tree_root,
9098                                                        block_group);
9099                 }
9100                 if (!block_group) {
9101                         if (last == 0)
9102                                 break;
9103                         last = 0;
9104                         continue;
9105                 }
9106
9107                 inode = block_group->inode;
9108                 block_group->iref = 0;
9109                 block_group->inode = NULL;
9110                 spin_unlock(&block_group->lock);
9111                 iput(inode);
9112                 last = block_group->key.objectid + block_group->key.offset;
9113                 btrfs_put_block_group(block_group);
9114         }
9115 }
9116
9117 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9118 {
9119         struct btrfs_block_group_cache *block_group;
9120         struct btrfs_space_info *space_info;
9121         struct btrfs_caching_control *caching_ctl;
9122         struct rb_node *n;
9123
9124         down_write(&info->commit_root_sem);
9125         while (!list_empty(&info->caching_block_groups)) {
9126                 caching_ctl = list_entry(info->caching_block_groups.next,
9127                                          struct btrfs_caching_control, list);
9128                 list_del(&caching_ctl->list);
9129                 put_caching_control(caching_ctl);
9130         }
9131         up_write(&info->commit_root_sem);
9132
9133         spin_lock(&info->unused_bgs_lock);
9134         while (!list_empty(&info->unused_bgs)) {
9135                 block_group = list_first_entry(&info->unused_bgs,
9136                                                struct btrfs_block_group_cache,
9137                                                bg_list);
9138                 list_del_init(&block_group->bg_list);
9139                 btrfs_put_block_group(block_group);
9140         }
9141         spin_unlock(&info->unused_bgs_lock);
9142
9143         spin_lock(&info->block_group_cache_lock);
9144         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9145                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9146                                        cache_node);
9147                 rb_erase(&block_group->cache_node,
9148                          &info->block_group_cache_tree);
9149                 RB_CLEAR_NODE(&block_group->cache_node);
9150                 spin_unlock(&info->block_group_cache_lock);
9151
9152                 down_write(&block_group->space_info->groups_sem);
9153                 list_del(&block_group->list);
9154                 up_write(&block_group->space_info->groups_sem);
9155
9156                 if (block_group->cached == BTRFS_CACHE_STARTED)
9157                         wait_block_group_cache_done(block_group);
9158
9159                 /*
9160                  * We haven't cached this block group, which means we could
9161                  * possibly have excluded extents on this block group.
9162                  */
9163                 if (block_group->cached == BTRFS_CACHE_NO ||
9164                     block_group->cached == BTRFS_CACHE_ERROR)
9165                         free_excluded_extents(info->extent_root, block_group);
9166
9167                 btrfs_remove_free_space_cache(block_group);
9168                 btrfs_put_block_group(block_group);
9169
9170                 spin_lock(&info->block_group_cache_lock);
9171         }
9172         spin_unlock(&info->block_group_cache_lock);
9173
9174         /* now that all the block groups are freed, go through and
9175          * free all the space_info structs.  This is only called during
9176          * the final stages of unmount, and so we know nobody is
9177          * using them.  We call synchronize_rcu() once before we start,
9178          * just to be on the safe side.
9179          */
9180         synchronize_rcu();
9181
9182         release_global_block_rsv(info);
9183
9184         while (!list_empty(&info->space_info)) {
9185                 int i;
9186
9187                 space_info = list_entry(info->space_info.next,
9188                                         struct btrfs_space_info,
9189                                         list);
9190                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9191                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9192                             space_info->bytes_reserved > 0 ||
9193                             space_info->bytes_may_use > 0)) {
9194                                 dump_space_info(space_info, 0, 0);
9195                         }
9196                 }
9197                 list_del(&space_info->list);
9198                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9199                         struct kobject *kobj;
9200                         kobj = space_info->block_group_kobjs[i];
9201                         space_info->block_group_kobjs[i] = NULL;
9202                         if (kobj) {
9203                                 kobject_del(kobj);
9204                                 kobject_put(kobj);
9205                         }
9206                 }
9207                 kobject_del(&space_info->kobj);
9208                 kobject_put(&space_info->kobj);
9209         }
9210         return 0;
9211 }
9212
9213 static void __link_block_group(struct btrfs_space_info *space_info,
9214                                struct btrfs_block_group_cache *cache)
9215 {
9216         int index = get_block_group_index(cache);
9217         bool first = false;
9218
9219         down_write(&space_info->groups_sem);
9220         if (list_empty(&space_info->block_groups[index]))
9221                 first = true;
9222         list_add_tail(&cache->list, &space_info->block_groups[index]);
9223         up_write(&space_info->groups_sem);
9224
9225         if (first) {
9226                 struct raid_kobject *rkobj;
9227                 int ret;
9228
9229                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9230                 if (!rkobj)
9231                         goto out_err;
9232                 rkobj->raid_type = index;
9233                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9234                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9235                                   "%s", get_raid_name(index));
9236                 if (ret) {
9237                         kobject_put(&rkobj->kobj);
9238                         goto out_err;
9239                 }
9240                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9241         }
9242
9243         return;
9244 out_err:
9245         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9246 }
9247
9248 static struct btrfs_block_group_cache *
9249 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9250 {
9251         struct btrfs_block_group_cache *cache;
9252
9253         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9254         if (!cache)
9255                 return NULL;
9256
9257         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9258                                         GFP_NOFS);
9259         if (!cache->free_space_ctl) {
9260                 kfree(cache);
9261                 return NULL;
9262         }
9263
9264         cache->key.objectid = start;
9265         cache->key.offset = size;
9266         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9267
9268         cache->sectorsize = root->sectorsize;
9269         cache->fs_info = root->fs_info;
9270         cache->full_stripe_len = btrfs_full_stripe_len(root,
9271                                                &root->fs_info->mapping_tree,
9272                                                start);
9273         atomic_set(&cache->count, 1);
9274         spin_lock_init(&cache->lock);
9275         init_rwsem(&cache->data_rwsem);
9276         INIT_LIST_HEAD(&cache->list);
9277         INIT_LIST_HEAD(&cache->cluster_list);
9278         INIT_LIST_HEAD(&cache->bg_list);
9279         INIT_LIST_HEAD(&cache->ro_list);
9280         INIT_LIST_HEAD(&cache->dirty_list);
9281         INIT_LIST_HEAD(&cache->io_list);
9282         btrfs_init_free_space_ctl(cache);
9283         atomic_set(&cache->trimming, 0);
9284
9285         return cache;
9286 }
9287
9288 int btrfs_read_block_groups(struct btrfs_root *root)
9289 {
9290         struct btrfs_path *path;
9291         int ret;
9292         struct btrfs_block_group_cache *cache;
9293         struct btrfs_fs_info *info = root->fs_info;
9294         struct btrfs_space_info *space_info;
9295         struct btrfs_key key;
9296         struct btrfs_key found_key;
9297         struct extent_buffer *leaf;
9298         int need_clear = 0;
9299         u64 cache_gen;
9300
9301         root = info->extent_root;
9302         key.objectid = 0;
9303         key.offset = 0;
9304         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9305         path = btrfs_alloc_path();
9306         if (!path)
9307                 return -ENOMEM;
9308         path->reada = 1;
9309
9310         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9311         if (btrfs_test_opt(root, SPACE_CACHE) &&
9312             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9313                 need_clear = 1;
9314         if (btrfs_test_opt(root, CLEAR_CACHE))
9315                 need_clear = 1;
9316
9317         while (1) {
9318                 ret = find_first_block_group(root, path, &key);
9319                 if (ret > 0)
9320                         break;
9321                 if (ret != 0)
9322                         goto error;
9323
9324                 leaf = path->nodes[0];
9325                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9326
9327                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9328                                                        found_key.offset);
9329                 if (!cache) {
9330                         ret = -ENOMEM;
9331                         goto error;
9332                 }
9333
9334                 if (need_clear) {
9335                         /*
9336                          * When we mount with old space cache, we need to
9337                          * set BTRFS_DC_CLEAR and set dirty flag.
9338                          *
9339                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9340                          *    truncate the old free space cache inode and
9341                          *    setup a new one.
9342                          * b) Setting 'dirty flag' makes sure that we flush
9343                          *    the new space cache info onto disk.
9344                          */
9345                         if (btrfs_test_opt(root, SPACE_CACHE))
9346                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9347                 }
9348
9349                 read_extent_buffer(leaf, &cache->item,
9350                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9351                                    sizeof(cache->item));
9352                 cache->flags = btrfs_block_group_flags(&cache->item);
9353
9354                 key.objectid = found_key.objectid + found_key.offset;
9355                 btrfs_release_path(path);
9356
9357                 /*
9358                  * We need to exclude the super stripes now so that the space
9359                  * info has super bytes accounted for, otherwise we'll think
9360                  * we have more space than we actually do.
9361                  */
9362                 ret = exclude_super_stripes(root, cache);
9363                 if (ret) {
9364                         /*
9365                          * We may have excluded something, so call this just in
9366                          * case.
9367                          */
9368                         free_excluded_extents(root, cache);
9369                         btrfs_put_block_group(cache);
9370                         goto error;
9371                 }
9372
9373                 /*
9374                  * check for two cases, either we are full, and therefore
9375                  * don't need to bother with the caching work since we won't
9376                  * find any space, or we are empty, and we can just add all
9377                  * the space in and be done with it.  This saves us _alot_ of
9378                  * time, particularly in the full case.
9379                  */
9380                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9381                         cache->last_byte_to_unpin = (u64)-1;
9382                         cache->cached = BTRFS_CACHE_FINISHED;
9383                         free_excluded_extents(root, cache);
9384                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9385                         cache->last_byte_to_unpin = (u64)-1;
9386                         cache->cached = BTRFS_CACHE_FINISHED;
9387                         add_new_free_space(cache, root->fs_info,
9388                                            found_key.objectid,
9389                                            found_key.objectid +
9390                                            found_key.offset);
9391                         free_excluded_extents(root, cache);
9392                 }
9393
9394                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9395                 if (ret) {
9396                         btrfs_remove_free_space_cache(cache);
9397                         btrfs_put_block_group(cache);
9398                         goto error;
9399                 }
9400
9401                 ret = update_space_info(info, cache->flags, found_key.offset,
9402                                         btrfs_block_group_used(&cache->item),
9403                                         &space_info);
9404                 if (ret) {
9405                         btrfs_remove_free_space_cache(cache);
9406                         spin_lock(&info->block_group_cache_lock);
9407                         rb_erase(&cache->cache_node,
9408                                  &info->block_group_cache_tree);
9409                         RB_CLEAR_NODE(&cache->cache_node);
9410                         spin_unlock(&info->block_group_cache_lock);
9411                         btrfs_put_block_group(cache);
9412                         goto error;
9413                 }
9414
9415                 cache->space_info = space_info;
9416                 spin_lock(&cache->space_info->lock);
9417                 cache->space_info->bytes_readonly += cache->bytes_super;
9418                 spin_unlock(&cache->space_info->lock);
9419
9420                 __link_block_group(space_info, cache);
9421
9422                 set_avail_alloc_bits(root->fs_info, cache->flags);
9423                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9424                         set_block_group_ro(cache, 1);
9425                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9426                         spin_lock(&info->unused_bgs_lock);
9427                         /* Should always be true but just in case. */
9428                         if (list_empty(&cache->bg_list)) {
9429                                 btrfs_get_block_group(cache);
9430                                 list_add_tail(&cache->bg_list,
9431                                               &info->unused_bgs);
9432                         }
9433                         spin_unlock(&info->unused_bgs_lock);
9434                 }
9435         }
9436
9437         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9438                 if (!(get_alloc_profile(root, space_info->flags) &
9439                       (BTRFS_BLOCK_GROUP_RAID10 |
9440                        BTRFS_BLOCK_GROUP_RAID1 |
9441                        BTRFS_BLOCK_GROUP_RAID5 |
9442                        BTRFS_BLOCK_GROUP_RAID6 |
9443                        BTRFS_BLOCK_GROUP_DUP)))
9444                         continue;
9445                 /*
9446                  * avoid allocating from un-mirrored block group if there are
9447                  * mirrored block groups.
9448                  */
9449                 list_for_each_entry(cache,
9450                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9451                                 list)
9452                         set_block_group_ro(cache, 1);
9453                 list_for_each_entry(cache,
9454                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9455                                 list)
9456                         set_block_group_ro(cache, 1);
9457         }
9458
9459         init_global_block_rsv(info);
9460         ret = 0;
9461 error:
9462         btrfs_free_path(path);
9463         return ret;
9464 }
9465
9466 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9467                                        struct btrfs_root *root)
9468 {
9469         struct btrfs_block_group_cache *block_group, *tmp;
9470         struct btrfs_root *extent_root = root->fs_info->extent_root;
9471         struct btrfs_block_group_item item;
9472         struct btrfs_key key;
9473         int ret = 0;
9474
9475         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9476                 if (ret)
9477                         goto next;
9478
9479                 spin_lock(&block_group->lock);
9480                 memcpy(&item, &block_group->item, sizeof(item));
9481                 memcpy(&key, &block_group->key, sizeof(key));
9482                 spin_unlock(&block_group->lock);
9483
9484                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9485                                         sizeof(item));
9486                 if (ret)
9487                         btrfs_abort_transaction(trans, extent_root, ret);
9488                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9489                                                key.objectid, key.offset);
9490                 if (ret)
9491                         btrfs_abort_transaction(trans, extent_root, ret);
9492 next:
9493                 list_del_init(&block_group->bg_list);
9494         }
9495 }
9496
9497 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9498                            struct btrfs_root *root, u64 bytes_used,
9499                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9500                            u64 size)
9501 {
9502         int ret;
9503         struct btrfs_root *extent_root;
9504         struct btrfs_block_group_cache *cache;
9505
9506         extent_root = root->fs_info->extent_root;
9507
9508         btrfs_set_log_full_commit(root->fs_info, trans);
9509
9510         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9511         if (!cache)
9512                 return -ENOMEM;
9513
9514         btrfs_set_block_group_used(&cache->item, bytes_used);
9515         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9516         btrfs_set_block_group_flags(&cache->item, type);
9517
9518         cache->flags = type;
9519         cache->last_byte_to_unpin = (u64)-1;
9520         cache->cached = BTRFS_CACHE_FINISHED;
9521         ret = exclude_super_stripes(root, cache);
9522         if (ret) {
9523                 /*
9524                  * We may have excluded something, so call this just in
9525                  * case.
9526                  */
9527                 free_excluded_extents(root, cache);
9528                 btrfs_put_block_group(cache);
9529                 return ret;
9530         }
9531
9532         add_new_free_space(cache, root->fs_info, chunk_offset,
9533                            chunk_offset + size);
9534
9535         free_excluded_extents(root, cache);
9536
9537         /*
9538          * Call to ensure the corresponding space_info object is created and
9539          * assigned to our block group, but don't update its counters just yet.
9540          * We want our bg to be added to the rbtree with its ->space_info set.
9541          */
9542         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9543                                 &cache->space_info);
9544         if (ret) {
9545                 btrfs_remove_free_space_cache(cache);
9546                 btrfs_put_block_group(cache);
9547                 return ret;
9548         }
9549
9550         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9551         if (ret) {
9552                 btrfs_remove_free_space_cache(cache);
9553                 btrfs_put_block_group(cache);
9554                 return ret;
9555         }
9556
9557         /*
9558          * Now that our block group has its ->space_info set and is inserted in
9559          * the rbtree, update the space info's counters.
9560          */
9561         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9562                                 &cache->space_info);
9563         if (ret) {
9564                 btrfs_remove_free_space_cache(cache);
9565                 spin_lock(&root->fs_info->block_group_cache_lock);
9566                 rb_erase(&cache->cache_node,
9567                          &root->fs_info->block_group_cache_tree);
9568                 RB_CLEAR_NODE(&cache->cache_node);
9569                 spin_unlock(&root->fs_info->block_group_cache_lock);
9570                 btrfs_put_block_group(cache);
9571                 return ret;
9572         }
9573         update_global_block_rsv(root->fs_info);
9574
9575         spin_lock(&cache->space_info->lock);
9576         cache->space_info->bytes_readonly += cache->bytes_super;
9577         spin_unlock(&cache->space_info->lock);
9578
9579         __link_block_group(cache->space_info, cache);
9580
9581         list_add_tail(&cache->bg_list, &trans->new_bgs);
9582
9583         set_avail_alloc_bits(extent_root->fs_info, type);
9584
9585         return 0;
9586 }
9587
9588 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9589 {
9590         u64 extra_flags = chunk_to_extended(flags) &
9591                                 BTRFS_EXTENDED_PROFILE_MASK;
9592
9593         write_seqlock(&fs_info->profiles_lock);
9594         if (flags & BTRFS_BLOCK_GROUP_DATA)
9595                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9596         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9597                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9598         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9599                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9600         write_sequnlock(&fs_info->profiles_lock);
9601 }
9602
9603 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9604                              struct btrfs_root *root, u64 group_start,
9605                              struct extent_map *em)
9606 {
9607         struct btrfs_path *path;
9608         struct btrfs_block_group_cache *block_group;
9609         struct btrfs_free_cluster *cluster;
9610         struct btrfs_root *tree_root = root->fs_info->tree_root;
9611         struct btrfs_key key;
9612         struct inode *inode;
9613         struct kobject *kobj = NULL;
9614         int ret;
9615         int index;
9616         int factor;
9617         struct btrfs_caching_control *caching_ctl = NULL;
9618         bool remove_em;
9619
9620         root = root->fs_info->extent_root;
9621
9622         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9623         BUG_ON(!block_group);
9624         BUG_ON(!block_group->ro);
9625
9626         /*
9627          * Free the reserved super bytes from this block group before
9628          * remove it.
9629          */
9630         free_excluded_extents(root, block_group);
9631
9632         memcpy(&key, &block_group->key, sizeof(key));
9633         index = get_block_group_index(block_group);
9634         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9635                                   BTRFS_BLOCK_GROUP_RAID1 |
9636                                   BTRFS_BLOCK_GROUP_RAID10))
9637                 factor = 2;
9638         else
9639                 factor = 1;
9640
9641         /* make sure this block group isn't part of an allocation cluster */
9642         cluster = &root->fs_info->data_alloc_cluster;
9643         spin_lock(&cluster->refill_lock);
9644         btrfs_return_cluster_to_free_space(block_group, cluster);
9645         spin_unlock(&cluster->refill_lock);
9646
9647         /*
9648          * make sure this block group isn't part of a metadata
9649          * allocation cluster
9650          */
9651         cluster = &root->fs_info->meta_alloc_cluster;
9652         spin_lock(&cluster->refill_lock);
9653         btrfs_return_cluster_to_free_space(block_group, cluster);
9654         spin_unlock(&cluster->refill_lock);
9655
9656         path = btrfs_alloc_path();
9657         if (!path) {
9658                 ret = -ENOMEM;
9659                 goto out;
9660         }
9661
9662         /*
9663          * get the inode first so any iput calls done for the io_list
9664          * aren't the final iput (no unlinks allowed now)
9665          */
9666         inode = lookup_free_space_inode(tree_root, block_group, path);
9667
9668         mutex_lock(&trans->transaction->cache_write_mutex);
9669         /*
9670          * make sure our free spache cache IO is done before remove the
9671          * free space inode
9672          */
9673         spin_lock(&trans->transaction->dirty_bgs_lock);
9674         if (!list_empty(&block_group->io_list)) {
9675                 list_del_init(&block_group->io_list);
9676
9677                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9678
9679                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9680                 btrfs_wait_cache_io(root, trans, block_group,
9681                                     &block_group->io_ctl, path,
9682                                     block_group->key.objectid);
9683                 btrfs_put_block_group(block_group);
9684                 spin_lock(&trans->transaction->dirty_bgs_lock);
9685         }
9686
9687         if (!list_empty(&block_group->dirty_list)) {
9688                 list_del_init(&block_group->dirty_list);
9689                 btrfs_put_block_group(block_group);
9690         }
9691         spin_unlock(&trans->transaction->dirty_bgs_lock);
9692         mutex_unlock(&trans->transaction->cache_write_mutex);
9693
9694         if (!IS_ERR(inode)) {
9695                 ret = btrfs_orphan_add(trans, inode);
9696                 if (ret) {
9697                         btrfs_add_delayed_iput(inode);
9698                         goto out;
9699                 }
9700                 clear_nlink(inode);
9701                 /* One for the block groups ref */
9702                 spin_lock(&block_group->lock);
9703                 if (block_group->iref) {
9704                         block_group->iref = 0;
9705                         block_group->inode = NULL;
9706                         spin_unlock(&block_group->lock);
9707                         iput(inode);
9708                 } else {
9709                         spin_unlock(&block_group->lock);
9710                 }
9711                 /* One for our lookup ref */
9712                 btrfs_add_delayed_iput(inode);
9713         }
9714
9715         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9716         key.offset = block_group->key.objectid;
9717         key.type = 0;
9718
9719         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9720         if (ret < 0)
9721                 goto out;
9722         if (ret > 0)
9723                 btrfs_release_path(path);
9724         if (ret == 0) {
9725                 ret = btrfs_del_item(trans, tree_root, path);
9726                 if (ret)
9727                         goto out;
9728                 btrfs_release_path(path);
9729         }
9730
9731         spin_lock(&root->fs_info->block_group_cache_lock);
9732         rb_erase(&block_group->cache_node,
9733                  &root->fs_info->block_group_cache_tree);
9734         RB_CLEAR_NODE(&block_group->cache_node);
9735
9736         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9737                 root->fs_info->first_logical_byte = (u64)-1;
9738         spin_unlock(&root->fs_info->block_group_cache_lock);
9739
9740         down_write(&block_group->space_info->groups_sem);
9741         /*
9742          * we must use list_del_init so people can check to see if they
9743          * are still on the list after taking the semaphore
9744          */
9745         list_del_init(&block_group->list);
9746         if (list_empty(&block_group->space_info->block_groups[index])) {
9747                 kobj = block_group->space_info->block_group_kobjs[index];
9748                 block_group->space_info->block_group_kobjs[index] = NULL;
9749                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9750         }
9751         up_write(&block_group->space_info->groups_sem);
9752         if (kobj) {
9753                 kobject_del(kobj);
9754                 kobject_put(kobj);
9755         }
9756
9757         if (block_group->has_caching_ctl)
9758                 caching_ctl = get_caching_control(block_group);
9759         if (block_group->cached == BTRFS_CACHE_STARTED)
9760                 wait_block_group_cache_done(block_group);
9761         if (block_group->has_caching_ctl) {
9762                 down_write(&root->fs_info->commit_root_sem);
9763                 if (!caching_ctl) {
9764                         struct btrfs_caching_control *ctl;
9765
9766                         list_for_each_entry(ctl,
9767                                     &root->fs_info->caching_block_groups, list)
9768                                 if (ctl->block_group == block_group) {
9769                                         caching_ctl = ctl;
9770                                         atomic_inc(&caching_ctl->count);
9771                                         break;
9772                                 }
9773                 }
9774                 if (caching_ctl)
9775                         list_del_init(&caching_ctl->list);
9776                 up_write(&root->fs_info->commit_root_sem);
9777                 if (caching_ctl) {
9778                         /* Once for the caching bgs list and once for us. */
9779                         put_caching_control(caching_ctl);
9780                         put_caching_control(caching_ctl);
9781                 }
9782         }
9783
9784         spin_lock(&trans->transaction->dirty_bgs_lock);
9785         if (!list_empty(&block_group->dirty_list)) {
9786                 WARN_ON(1);
9787         }
9788         if (!list_empty(&block_group->io_list)) {
9789                 WARN_ON(1);
9790         }
9791         spin_unlock(&trans->transaction->dirty_bgs_lock);
9792         btrfs_remove_free_space_cache(block_group);
9793
9794         spin_lock(&block_group->space_info->lock);
9795         list_del_init(&block_group->ro_list);
9796
9797         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9798                 WARN_ON(block_group->space_info->total_bytes
9799                         < block_group->key.offset);
9800                 WARN_ON(block_group->space_info->bytes_readonly
9801                         < block_group->key.offset);
9802                 WARN_ON(block_group->space_info->disk_total
9803                         < block_group->key.offset * factor);
9804         }
9805         block_group->space_info->total_bytes -= block_group->key.offset;
9806         block_group->space_info->bytes_readonly -= block_group->key.offset;
9807         block_group->space_info->disk_total -= block_group->key.offset * factor;
9808
9809         spin_unlock(&block_group->space_info->lock);
9810
9811         memcpy(&key, &block_group->key, sizeof(key));
9812
9813         lock_chunks(root);
9814         if (!list_empty(&em->list)) {
9815                 /* We're in the transaction->pending_chunks list. */
9816                 free_extent_map(em);
9817         }
9818         spin_lock(&block_group->lock);
9819         block_group->removed = 1;
9820         /*
9821          * At this point trimming can't start on this block group, because we
9822          * removed the block group from the tree fs_info->block_group_cache_tree
9823          * so no one can't find it anymore and even if someone already got this
9824          * block group before we removed it from the rbtree, they have already
9825          * incremented block_group->trimming - if they didn't, they won't find
9826          * any free space entries because we already removed them all when we
9827          * called btrfs_remove_free_space_cache().
9828          *
9829          * And we must not remove the extent map from the fs_info->mapping_tree
9830          * to prevent the same logical address range and physical device space
9831          * ranges from being reused for a new block group. This is because our
9832          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9833          * completely transactionless, so while it is trimming a range the
9834          * currently running transaction might finish and a new one start,
9835          * allowing for new block groups to be created that can reuse the same
9836          * physical device locations unless we take this special care.
9837          */
9838         remove_em = (atomic_read(&block_group->trimming) == 0);
9839         /*
9840          * Make sure a trimmer task always sees the em in the pinned_chunks list
9841          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9842          * before checking block_group->removed).
9843          */
9844         if (!remove_em) {
9845                 /*
9846                  * Our em might be in trans->transaction->pending_chunks which
9847                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9848                  * and so is the fs_info->pinned_chunks list.
9849                  *
9850                  * So at this point we must be holding the chunk_mutex to avoid
9851                  * any races with chunk allocation (more specifically at
9852                  * volumes.c:contains_pending_extent()), to ensure it always
9853                  * sees the em, either in the pending_chunks list or in the
9854                  * pinned_chunks list.
9855                  */
9856                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9857         }
9858         spin_unlock(&block_group->lock);
9859
9860         if (remove_em) {
9861                 struct extent_map_tree *em_tree;
9862
9863                 em_tree = &root->fs_info->mapping_tree.map_tree;
9864                 write_lock(&em_tree->lock);
9865                 /*
9866                  * The em might be in the pending_chunks list, so make sure the
9867                  * chunk mutex is locked, since remove_extent_mapping() will
9868                  * delete us from that list.
9869                  */
9870                 remove_extent_mapping(em_tree, em);
9871                 write_unlock(&em_tree->lock);
9872                 /* once for the tree */
9873                 free_extent_map(em);
9874         }
9875
9876         unlock_chunks(root);
9877
9878         btrfs_put_block_group(block_group);
9879         btrfs_put_block_group(block_group);
9880
9881         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9882         if (ret > 0)
9883                 ret = -EIO;
9884         if (ret < 0)
9885                 goto out;
9886
9887         ret = btrfs_del_item(trans, root, path);
9888 out:
9889         btrfs_free_path(path);
9890         return ret;
9891 }
9892
9893 /*
9894  * Process the unused_bgs list and remove any that don't have any allocated
9895  * space inside of them.
9896  */
9897 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9898 {
9899         struct btrfs_block_group_cache *block_group;
9900         struct btrfs_space_info *space_info;
9901         struct btrfs_root *root = fs_info->extent_root;
9902         struct btrfs_trans_handle *trans;
9903         int ret = 0;
9904
9905         if (!fs_info->open)
9906                 return;
9907
9908         spin_lock(&fs_info->unused_bgs_lock);
9909         while (!list_empty(&fs_info->unused_bgs)) {
9910                 u64 start, end;
9911
9912                 block_group = list_first_entry(&fs_info->unused_bgs,
9913                                                struct btrfs_block_group_cache,
9914                                                bg_list);
9915                 space_info = block_group->space_info;
9916                 list_del_init(&block_group->bg_list);
9917                 if (ret || btrfs_mixed_space_info(space_info)) {
9918                         btrfs_put_block_group(block_group);
9919                         continue;
9920                 }
9921                 spin_unlock(&fs_info->unused_bgs_lock);
9922
9923                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
9924
9925                 /* Don't want to race with allocators so take the groups_sem */
9926                 down_write(&space_info->groups_sem);
9927                 spin_lock(&block_group->lock);
9928                 if (block_group->reserved ||
9929                     btrfs_block_group_used(&block_group->item) ||
9930                     block_group->ro) {
9931                         /*
9932                          * We want to bail if we made new allocations or have
9933                          * outstanding allocations in this block group.  We do
9934                          * the ro check in case balance is currently acting on
9935                          * this block group.
9936                          */
9937                         spin_unlock(&block_group->lock);
9938                         up_write(&space_info->groups_sem);
9939                         goto next;
9940                 }
9941                 spin_unlock(&block_group->lock);
9942
9943                 /* We don't want to force the issue, only flip if it's ok. */
9944                 ret = set_block_group_ro(block_group, 0);
9945                 up_write(&space_info->groups_sem);
9946                 if (ret < 0) {
9947                         ret = 0;
9948                         goto next;
9949                 }
9950
9951                 /*
9952                  * Want to do this before we do anything else so we can recover
9953                  * properly if we fail to join the transaction.
9954                  */
9955                 /* 1 for btrfs_orphan_reserve_metadata() */
9956                 trans = btrfs_start_transaction(root, 1);
9957                 if (IS_ERR(trans)) {
9958                         btrfs_set_block_group_rw(root, block_group);
9959                         ret = PTR_ERR(trans);
9960                         goto next;
9961                 }
9962
9963                 /*
9964                  * We could have pending pinned extents for this block group,
9965                  * just delete them, we don't care about them anymore.
9966                  */
9967                 start = block_group->key.objectid;
9968                 end = start + block_group->key.offset - 1;
9969                 /*
9970                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
9971                  * btrfs_finish_extent_commit(). If we are at transaction N,
9972                  * another task might be running finish_extent_commit() for the
9973                  * previous transaction N - 1, and have seen a range belonging
9974                  * to the block group in freed_extents[] before we were able to
9975                  * clear the whole block group range from freed_extents[]. This
9976                  * means that task can lookup for the block group after we
9977                  * unpinned it from freed_extents[] and removed it, leading to
9978                  * a BUG_ON() at btrfs_unpin_extent_range().
9979                  */
9980                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9981                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9982                                   EXTENT_DIRTY, GFP_NOFS);
9983                 if (ret) {
9984                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9985                         btrfs_set_block_group_rw(root, block_group);
9986                         goto end_trans;
9987                 }
9988                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9989                                   EXTENT_DIRTY, GFP_NOFS);
9990                 if (ret) {
9991                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9992                         btrfs_set_block_group_rw(root, block_group);
9993                         goto end_trans;
9994                 }
9995                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9996
9997                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9998                 spin_lock(&space_info->lock);
9999                 spin_lock(&block_group->lock);
10000
10001                 space_info->bytes_pinned -= block_group->pinned;
10002                 space_info->bytes_readonly += block_group->pinned;
10003                 percpu_counter_add(&space_info->total_bytes_pinned,
10004                                    -block_group->pinned);
10005                 block_group->pinned = 0;
10006
10007                 spin_unlock(&block_group->lock);
10008                 spin_unlock(&space_info->lock);
10009
10010                 /*
10011                  * Btrfs_remove_chunk will abort the transaction if things go
10012                  * horribly wrong.
10013                  */
10014                 ret = btrfs_remove_chunk(trans, root,
10015                                          block_group->key.objectid);
10016 end_trans:
10017                 btrfs_end_transaction(trans, root);
10018 next:
10019                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10020                 btrfs_put_block_group(block_group);
10021                 spin_lock(&fs_info->unused_bgs_lock);
10022         }
10023         spin_unlock(&fs_info->unused_bgs_lock);
10024 }
10025
10026 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10027 {
10028         struct btrfs_space_info *space_info;
10029         struct btrfs_super_block *disk_super;
10030         u64 features;
10031         u64 flags;
10032         int mixed = 0;
10033         int ret;
10034
10035         disk_super = fs_info->super_copy;
10036         if (!btrfs_super_root(disk_super))
10037                 return 1;
10038
10039         features = btrfs_super_incompat_flags(disk_super);
10040         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10041                 mixed = 1;
10042
10043         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10044         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10045         if (ret)
10046                 goto out;
10047
10048         if (mixed) {
10049                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10050                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10051         } else {
10052                 flags = BTRFS_BLOCK_GROUP_METADATA;
10053                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10054                 if (ret)
10055                         goto out;
10056
10057                 flags = BTRFS_BLOCK_GROUP_DATA;
10058                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10059         }
10060 out:
10061         return ret;
10062 }
10063
10064 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10065 {
10066         return unpin_extent_range(root, start, end, false);
10067 }
10068
10069 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10070 {
10071         struct btrfs_fs_info *fs_info = root->fs_info;
10072         struct btrfs_block_group_cache *cache = NULL;
10073         u64 group_trimmed;
10074         u64 start;
10075         u64 end;
10076         u64 trimmed = 0;
10077         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10078         int ret = 0;
10079
10080         /*
10081          * try to trim all FS space, our block group may start from non-zero.
10082          */
10083         if (range->len == total_bytes)
10084                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10085         else
10086                 cache = btrfs_lookup_block_group(fs_info, range->start);
10087
10088         while (cache) {
10089                 if (cache->key.objectid >= (range->start + range->len)) {
10090                         btrfs_put_block_group(cache);
10091                         break;
10092                 }
10093
10094                 start = max(range->start, cache->key.objectid);
10095                 end = min(range->start + range->len,
10096                                 cache->key.objectid + cache->key.offset);
10097
10098                 if (end - start >= range->minlen) {
10099                         if (!block_group_cache_done(cache)) {
10100                                 ret = cache_block_group(cache, 0);
10101                                 if (ret) {
10102                                         btrfs_put_block_group(cache);
10103                                         break;
10104                                 }
10105                                 ret = wait_block_group_cache_done(cache);
10106                                 if (ret) {
10107                                         btrfs_put_block_group(cache);
10108                                         break;
10109                                 }
10110                         }
10111                         ret = btrfs_trim_block_group(cache,
10112                                                      &group_trimmed,
10113                                                      start,
10114                                                      end,
10115                                                      range->minlen);
10116
10117                         trimmed += group_trimmed;
10118                         if (ret) {
10119                                 btrfs_put_block_group(cache);
10120                                 break;
10121                         }
10122                 }
10123
10124                 cache = next_block_group(fs_info->tree_root, cache);
10125         }
10126
10127         range->len = trimmed;
10128         return ret;
10129 }
10130
10131 /*
10132  * btrfs_{start,end}_write_no_snapshoting() are similar to
10133  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10134  * data into the page cache through nocow before the subvolume is snapshoted,
10135  * but flush the data into disk after the snapshot creation, or to prevent
10136  * operations while snapshoting is ongoing and that cause the snapshot to be
10137  * inconsistent (writes followed by expanding truncates for example).
10138  */
10139 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10140 {
10141         percpu_counter_dec(&root->subv_writers->counter);
10142         /*
10143          * Make sure counter is updated before we wake up
10144          * waiters.
10145          */
10146         smp_mb();
10147         if (waitqueue_active(&root->subv_writers->wait))
10148                 wake_up(&root->subv_writers->wait);
10149 }
10150
10151 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10152 {
10153         if (atomic_read(&root->will_be_snapshoted))
10154                 return 0;
10155
10156         percpu_counter_inc(&root->subv_writers->counter);
10157         /*
10158          * Make sure counter is updated before we check for snapshot creation.
10159          */
10160         smp_mb();
10161         if (atomic_read(&root->will_be_snapshoted)) {
10162                 btrfs_end_write_no_snapshoting(root);
10163                 return 0;
10164         }
10165         return 1;
10166 }