btrfs: btrfs_issue_discard ensure offset/length are aligned to sector boundaries
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                               struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static noinline void caching_thread(struct btrfs_work *work)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_caching_control *caching_ctl;
383         struct btrfs_root *extent_root;
384         struct btrfs_path *path;
385         struct extent_buffer *leaf;
386         struct btrfs_key key;
387         u64 total_found = 0;
388         u64 last = 0;
389         u32 nritems;
390         int ret = -ENOMEM;
391
392         caching_ctl = container_of(work, struct btrfs_caching_control, work);
393         block_group = caching_ctl->block_group;
394         fs_info = block_group->fs_info;
395         extent_root = fs_info->extent_root;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 goto out;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403         /*
404          * We don't want to deadlock with somebody trying to allocate a new
405          * extent for the extent root while also trying to search the extent
406          * root to add free space.  So we skip locking and search the commit
407          * root, since its read-only
408          */
409         path->skip_locking = 1;
410         path->search_commit_root = 1;
411         path->reada = 1;
412
413         key.objectid = last;
414         key.offset = 0;
415         key.type = BTRFS_EXTENT_ITEM_KEY;
416 again:
417         mutex_lock(&caching_ctl->mutex);
418         /* need to make sure the commit_root doesn't disappear */
419         down_read(&fs_info->commit_root_sem);
420
421 next:
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched() ||
443                             rwsem_is_contended(&fs_info->commit_root_sem)) {
444                                 caching_ctl->progress = last;
445                                 btrfs_release_path(path);
446                                 up_read(&fs_info->commit_root_sem);
447                                 mutex_unlock(&caching_ctl->mutex);
448                                 cond_resched();
449                                 goto again;
450                         }
451
452                         ret = btrfs_next_leaf(extent_root, path);
453                         if (ret < 0)
454                                 goto err;
455                         if (ret)
456                                 break;
457                         leaf = path->nodes[0];
458                         nritems = btrfs_header_nritems(leaf);
459                         continue;
460                 }
461
462                 if (key.objectid < last) {
463                         key.objectid = last;
464                         key.offset = 0;
465                         key.type = BTRFS_EXTENT_ITEM_KEY;
466
467                         caching_ctl->progress = last;
468                         btrfs_release_path(path);
469                         goto next;
470                 }
471
472                 if (key.objectid < block_group->key.objectid) {
473                         path->slots[0]++;
474                         continue;
475                 }
476
477                 if (key.objectid >= block_group->key.objectid +
478                     block_group->key.offset)
479                         break;
480
481                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
482                     key.type == BTRFS_METADATA_ITEM_KEY) {
483                         total_found += add_new_free_space(block_group,
484                                                           fs_info, last,
485                                                           key.objectid);
486                         if (key.type == BTRFS_METADATA_ITEM_KEY)
487                                 last = key.objectid +
488                                         fs_info->tree_root->nodesize;
489                         else
490                                 last = key.objectid + key.offset;
491
492                         if (total_found > (1024 * 1024 * 2)) {
493                                 total_found = 0;
494                                 wake_up(&caching_ctl->wait);
495                         }
496                 }
497                 path->slots[0]++;
498         }
499         ret = 0;
500
501         total_found += add_new_free_space(block_group, fs_info, last,
502                                           block_group->key.objectid +
503                                           block_group->key.offset);
504         caching_ctl->progress = (u64)-1;
505
506         spin_lock(&block_group->lock);
507         block_group->caching_ctl = NULL;
508         block_group->cached = BTRFS_CACHE_FINISHED;
509         spin_unlock(&block_group->lock);
510
511 err:
512         btrfs_free_path(path);
513         up_read(&fs_info->commit_root_sem);
514
515         free_excluded_extents(extent_root, block_group);
516
517         mutex_unlock(&caching_ctl->mutex);
518 out:
519         if (ret) {
520                 spin_lock(&block_group->lock);
521                 block_group->caching_ctl = NULL;
522                 block_group->cached = BTRFS_CACHE_ERROR;
523                 spin_unlock(&block_group->lock);
524         }
525         wake_up(&caching_ctl->wait);
526
527         put_caching_control(caching_ctl);
528         btrfs_put_block_group(block_group);
529 }
530
531 static int cache_block_group(struct btrfs_block_group_cache *cache,
532                              int load_cache_only)
533 {
534         DEFINE_WAIT(wait);
535         struct btrfs_fs_info *fs_info = cache->fs_info;
536         struct btrfs_caching_control *caching_ctl;
537         int ret = 0;
538
539         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
540         if (!caching_ctl)
541                 return -ENOMEM;
542
543         INIT_LIST_HEAD(&caching_ctl->list);
544         mutex_init(&caching_ctl->mutex);
545         init_waitqueue_head(&caching_ctl->wait);
546         caching_ctl->block_group = cache;
547         caching_ctl->progress = cache->key.objectid;
548         atomic_set(&caching_ctl->count, 1);
549         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
550                         caching_thread, NULL, NULL);
551
552         spin_lock(&cache->lock);
553         /*
554          * This should be a rare occasion, but this could happen I think in the
555          * case where one thread starts to load the space cache info, and then
556          * some other thread starts a transaction commit which tries to do an
557          * allocation while the other thread is still loading the space cache
558          * info.  The previous loop should have kept us from choosing this block
559          * group, but if we've moved to the state where we will wait on caching
560          * block groups we need to first check if we're doing a fast load here,
561          * so we can wait for it to finish, otherwise we could end up allocating
562          * from a block group who's cache gets evicted for one reason or
563          * another.
564          */
565         while (cache->cached == BTRFS_CACHE_FAST) {
566                 struct btrfs_caching_control *ctl;
567
568                 ctl = cache->caching_ctl;
569                 atomic_inc(&ctl->count);
570                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
571                 spin_unlock(&cache->lock);
572
573                 schedule();
574
575                 finish_wait(&ctl->wait, &wait);
576                 put_caching_control(ctl);
577                 spin_lock(&cache->lock);
578         }
579
580         if (cache->cached != BTRFS_CACHE_NO) {
581                 spin_unlock(&cache->lock);
582                 kfree(caching_ctl);
583                 return 0;
584         }
585         WARN_ON(cache->caching_ctl);
586         cache->caching_ctl = caching_ctl;
587         cache->cached = BTRFS_CACHE_FAST;
588         spin_unlock(&cache->lock);
589
590         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591                 mutex_lock(&caching_ctl->mutex);
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                         caching_ctl->progress = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                                 cache->has_caching_ctl = 1;
607                         }
608                 }
609                 spin_unlock(&cache->lock);
610                 mutex_unlock(&caching_ctl->mutex);
611
612                 wake_up(&caching_ctl->wait);
613                 if (ret == 1) {
614                         put_caching_control(caching_ctl);
615                         free_excluded_extents(fs_info->extent_root, cache);
616                         return 0;
617                 }
618         } else {
619                 /*
620                  * We are not going to do the fast caching, set cached to the
621                  * appropriate value and wakeup any waiters.
622                  */
623                 spin_lock(&cache->lock);
624                 if (load_cache_only) {
625                         cache->caching_ctl = NULL;
626                         cache->cached = BTRFS_CACHE_NO;
627                 } else {
628                         cache->cached = BTRFS_CACHE_STARTED;
629                         cache->has_caching_ctl = 1;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784                                 &key, path, 0, 0);
785         if (ret < 0)
786                 goto out_free;
787
788         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789                 if (path->slots[0]) {
790                         path->slots[0]--;
791                         btrfs_item_key_to_cpu(path->nodes[0], &key,
792                                               path->slots[0]);
793                         if (key.objectid == bytenr &&
794                             key.type == BTRFS_EXTENT_ITEM_KEY &&
795                             key.offset == root->nodesize)
796                                 ret = 0;
797                 }
798         }
799
800         if (ret == 0) {
801                 leaf = path->nodes[0];
802                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803                 if (item_size >= sizeof(*ei)) {
804                         ei = btrfs_item_ptr(leaf, path->slots[0],
805                                             struct btrfs_extent_item);
806                         num_refs = btrfs_extent_refs(leaf, ei);
807                         extent_flags = btrfs_extent_flags(leaf, ei);
808                 } else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810                         struct btrfs_extent_item_v0 *ei0;
811                         BUG_ON(item_size != sizeof(*ei0));
812                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
813                                              struct btrfs_extent_item_v0);
814                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
815                         /* FIXME: this isn't correct for data */
816                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818                         BUG();
819 #endif
820                 }
821                 BUG_ON(num_refs == 0);
822         } else {
823                 num_refs = 0;
824                 extent_flags = 0;
825                 ret = 0;
826         }
827
828         if (!trans)
829                 goto out;
830
831         delayed_refs = &trans->transaction->delayed_refs;
832         spin_lock(&delayed_refs->lock);
833         head = btrfs_find_delayed_ref_head(trans, bytenr);
834         if (head) {
835                 if (!mutex_trylock(&head->mutex)) {
836                         atomic_inc(&head->node.refs);
837                         spin_unlock(&delayed_refs->lock);
838
839                         btrfs_release_path(path);
840
841                         /*
842                          * Mutex was contended, block until it's released and try
843                          * again
844                          */
845                         mutex_lock(&head->mutex);
846                         mutex_unlock(&head->mutex);
847                         btrfs_put_delayed_ref(&head->node);
848                         goto search_again;
849                 }
850                 spin_lock(&head->lock);
851                 if (head->extent_op && head->extent_op->update_flags)
852                         extent_flags |= head->extent_op->flags_to_set;
853                 else
854                         BUG_ON(num_refs == 0);
855
856                 num_refs += head->node.ref_mod;
857                 spin_unlock(&head->lock);
858                 mutex_unlock(&head->mutex);
859         }
860         spin_unlock(&delayed_refs->lock);
861 out:
862         WARN_ON(num_refs == 0);
863         if (refs)
864                 *refs = num_refs;
865         if (flags)
866                 *flags = extent_flags;
867 out_free:
868         btrfs_free_path(path);
869         return ret;
870 }
871
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980                                   struct btrfs_root *root,
981                                   struct btrfs_path *path,
982                                   u64 owner, u32 extra_size)
983 {
984         struct btrfs_extent_item *item;
985         struct btrfs_extent_item_v0 *ei0;
986         struct btrfs_extent_ref_v0 *ref0;
987         struct btrfs_tree_block_info *bi;
988         struct extent_buffer *leaf;
989         struct btrfs_key key;
990         struct btrfs_key found_key;
991         u32 new_size = sizeof(*item);
992         u64 refs;
993         int ret;
994
995         leaf = path->nodes[0];
996         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000                              struct btrfs_extent_item_v0);
1001         refs = btrfs_extent_refs_v0(leaf, ei0);
1002
1003         if (owner == (u64)-1) {
1004                 while (1) {
1005                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006                                 ret = btrfs_next_leaf(root, path);
1007                                 if (ret < 0)
1008                                         return ret;
1009                                 BUG_ON(ret > 0); /* Corruption */
1010                                 leaf = path->nodes[0];
1011                         }
1012                         btrfs_item_key_to_cpu(leaf, &found_key,
1013                                               path->slots[0]);
1014                         BUG_ON(key.objectid != found_key.objectid);
1015                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016                                 path->slots[0]++;
1017                                 continue;
1018                         }
1019                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020                                               struct btrfs_extent_ref_v0);
1021                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1022                         break;
1023                 }
1024         }
1025         btrfs_release_path(path);
1026
1027         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028                 new_size += sizeof(*bi);
1029
1030         new_size -= sizeof(*ei0);
1031         ret = btrfs_search_slot(trans, root, &key, path,
1032                                 new_size + extra_size, 1);
1033         if (ret < 0)
1034                 return ret;
1035         BUG_ON(ret); /* Corruption */
1036
1037         btrfs_extend_item(root, path, new_size);
1038
1039         leaf = path->nodes[0];
1040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041         btrfs_set_extent_refs(leaf, item, refs);
1042         /* FIXME: get real generation */
1043         btrfs_set_extent_generation(leaf, item, 0);
1044         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045                 btrfs_set_extent_flags(leaf, item,
1046                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048                 bi = (struct btrfs_tree_block_info *)(item + 1);
1049                 /* FIXME: get first key of the block */
1050                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052         } else {
1053                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054         }
1055         btrfs_mark_buffer_dirty(leaf);
1056         return 0;
1057 }
1058 #endif
1059
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062         u32 high_crc = ~(u32)0;
1063         u32 low_crc = ~(u32)0;
1064         __le64 lenum;
1065
1066         lenum = cpu_to_le64(root_objectid);
1067         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068         lenum = cpu_to_le64(owner);
1069         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(offset);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072
1073         return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077                                      struct btrfs_extent_data_ref *ref)
1078 {
1079         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080                                     btrfs_extent_data_ref_objectid(leaf, ref),
1081                                     btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085                                  struct btrfs_extent_data_ref *ref,
1086                                  u64 root_objectid, u64 owner, u64 offset)
1087 {
1088         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091                 return 0;
1092         return 1;
1093 }
1094
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096                                            struct btrfs_root *root,
1097                                            struct btrfs_path *path,
1098                                            u64 bytenr, u64 parent,
1099                                            u64 root_objectid,
1100                                            u64 owner, u64 offset)
1101 {
1102         struct btrfs_key key;
1103         struct btrfs_extent_data_ref *ref;
1104         struct extent_buffer *leaf;
1105         u32 nritems;
1106         int ret;
1107         int recow;
1108         int err = -ENOENT;
1109
1110         key.objectid = bytenr;
1111         if (parent) {
1112                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1113                 key.offset = parent;
1114         } else {
1115                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116                 key.offset = hash_extent_data_ref(root_objectid,
1117                                                   owner, offset);
1118         }
1119 again:
1120         recow = 0;
1121         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122         if (ret < 0) {
1123                 err = ret;
1124                 goto fail;
1125         }
1126
1127         if (parent) {
1128                 if (!ret)
1129                         return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1132                 btrfs_release_path(path);
1133                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134                 if (ret < 0) {
1135                         err = ret;
1136                         goto fail;
1137                 }
1138                 if (!ret)
1139                         return 0;
1140 #endif
1141                 goto fail;
1142         }
1143
1144         leaf = path->nodes[0];
1145         nritems = btrfs_header_nritems(leaf);
1146         while (1) {
1147                 if (path->slots[0] >= nritems) {
1148                         ret = btrfs_next_leaf(root, path);
1149                         if (ret < 0)
1150                                 err = ret;
1151                         if (ret)
1152                                 goto fail;
1153
1154                         leaf = path->nodes[0];
1155                         nritems = btrfs_header_nritems(leaf);
1156                         recow = 1;
1157                 }
1158
1159                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160                 if (key.objectid != bytenr ||
1161                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162                         goto fail;
1163
1164                 ref = btrfs_item_ptr(leaf, path->slots[0],
1165                                      struct btrfs_extent_data_ref);
1166
1167                 if (match_extent_data_ref(leaf, ref, root_objectid,
1168                                           owner, offset)) {
1169                         if (recow) {
1170                                 btrfs_release_path(path);
1171                                 goto again;
1172                         }
1173                         err = 0;
1174                         break;
1175                 }
1176                 path->slots[0]++;
1177         }
1178 fail:
1179         return err;
1180 }
1181
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            u64 bytenr, u64 parent,
1186                                            u64 root_objectid, u64 owner,
1187                                            u64 offset, int refs_to_add)
1188 {
1189         struct btrfs_key key;
1190         struct extent_buffer *leaf;
1191         u32 size;
1192         u32 num_refs;
1193         int ret;
1194
1195         key.objectid = bytenr;
1196         if (parent) {
1197                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1198                 key.offset = parent;
1199                 size = sizeof(struct btrfs_shared_data_ref);
1200         } else {
1201                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202                 key.offset = hash_extent_data_ref(root_objectid,
1203                                                   owner, offset);
1204                 size = sizeof(struct btrfs_extent_data_ref);
1205         }
1206
1207         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208         if (ret && ret != -EEXIST)
1209                 goto fail;
1210
1211         leaf = path->nodes[0];
1212         if (parent) {
1213                 struct btrfs_shared_data_ref *ref;
1214                 ref = btrfs_item_ptr(leaf, path->slots[0],
1215                                      struct btrfs_shared_data_ref);
1216                 if (ret == 0) {
1217                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218                 } else {
1219                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220                         num_refs += refs_to_add;
1221                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222                 }
1223         } else {
1224                 struct btrfs_extent_data_ref *ref;
1225                 while (ret == -EEXIST) {
1226                         ref = btrfs_item_ptr(leaf, path->slots[0],
1227                                              struct btrfs_extent_data_ref);
1228                         if (match_extent_data_ref(leaf, ref, root_objectid,
1229                                                   owner, offset))
1230                                 break;
1231                         btrfs_release_path(path);
1232                         key.offset++;
1233                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1234                                                       size);
1235                         if (ret && ret != -EEXIST)
1236                                 goto fail;
1237
1238                         leaf = path->nodes[0];
1239                 }
1240                 ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                      struct btrfs_extent_data_ref);
1242                 if (ret == 0) {
1243                         btrfs_set_extent_data_ref_root(leaf, ref,
1244                                                        root_objectid);
1245                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248                 } else {
1249                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250                         num_refs += refs_to_add;
1251                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252                 }
1253         }
1254         btrfs_mark_buffer_dirty(leaf);
1255         ret = 0;
1256 fail:
1257         btrfs_release_path(path);
1258         return ret;
1259 }
1260
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            int refs_to_drop, int *last_ref)
1265 {
1266         struct btrfs_key key;
1267         struct btrfs_extent_data_ref *ref1 = NULL;
1268         struct btrfs_shared_data_ref *ref2 = NULL;
1269         struct extent_buffer *leaf;
1270         u32 num_refs = 0;
1271         int ret = 0;
1272
1273         leaf = path->nodes[0];
1274         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275
1276         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_extent_data_ref);
1279                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282                                       struct btrfs_shared_data_ref);
1283                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286                 struct btrfs_extent_ref_v0 *ref0;
1287                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_ref_v0);
1289                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291         } else {
1292                 BUG();
1293         }
1294
1295         BUG_ON(num_refs < refs_to_drop);
1296         num_refs -= refs_to_drop;
1297
1298         if (num_refs == 0) {
1299                 ret = btrfs_del_item(trans, root, path);
1300                 *last_ref = 1;
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1320                                           struct btrfs_path *path,
1321                                           struct btrfs_extent_inline_ref *iref)
1322 {
1323         struct btrfs_key key;
1324         struct extent_buffer *leaf;
1325         struct btrfs_extent_data_ref *ref1;
1326         struct btrfs_shared_data_ref *ref2;
1327         u32 num_refs = 0;
1328
1329         leaf = path->nodes[0];
1330         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1331         if (iref) {
1332                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1333                     BTRFS_EXTENT_DATA_REF_KEY) {
1334                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1335                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1336                 } else {
1337                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1338                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1339                 }
1340         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 WARN_ON(1);
1357         }
1358         return num_refs;
1359 }
1360
1361 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1362                                           struct btrfs_root *root,
1363                                           struct btrfs_path *path,
1364                                           u64 bytenr, u64 parent,
1365                                           u64 root_objectid)
1366 {
1367         struct btrfs_key key;
1368         int ret;
1369
1370         key.objectid = bytenr;
1371         if (parent) {
1372                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1373                 key.offset = parent;
1374         } else {
1375                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1376                 key.offset = root_objectid;
1377         }
1378
1379         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1380         if (ret > 0)
1381                 ret = -ENOENT;
1382 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1383         if (ret == -ENOENT && parent) {
1384                 btrfs_release_path(path);
1385                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1386                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1387                 if (ret > 0)
1388                         ret = -ENOENT;
1389         }
1390 #endif
1391         return ret;
1392 }
1393
1394 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1395                                           struct btrfs_root *root,
1396                                           struct btrfs_path *path,
1397                                           u64 bytenr, u64 parent,
1398                                           u64 root_objectid)
1399 {
1400         struct btrfs_key key;
1401         int ret;
1402
1403         key.objectid = bytenr;
1404         if (parent) {
1405                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1406                 key.offset = parent;
1407         } else {
1408                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1409                 key.offset = root_objectid;
1410         }
1411
1412         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1413         btrfs_release_path(path);
1414         return ret;
1415 }
1416
1417 static inline int extent_ref_type(u64 parent, u64 owner)
1418 {
1419         int type;
1420         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1421                 if (parent > 0)
1422                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1423                 else
1424                         type = BTRFS_TREE_BLOCK_REF_KEY;
1425         } else {
1426                 if (parent > 0)
1427                         type = BTRFS_SHARED_DATA_REF_KEY;
1428                 else
1429                         type = BTRFS_EXTENT_DATA_REF_KEY;
1430         }
1431         return type;
1432 }
1433
1434 static int find_next_key(struct btrfs_path *path, int level,
1435                          struct btrfs_key *key)
1436
1437 {
1438         for (; level < BTRFS_MAX_LEVEL; level++) {
1439                 if (!path->nodes[level])
1440                         break;
1441                 if (path->slots[level] + 1 >=
1442                     btrfs_header_nritems(path->nodes[level]))
1443                         continue;
1444                 if (level == 0)
1445                         btrfs_item_key_to_cpu(path->nodes[level], key,
1446                                               path->slots[level] + 1);
1447                 else
1448                         btrfs_node_key_to_cpu(path->nodes[level], key,
1449                                               path->slots[level] + 1);
1450                 return 0;
1451         }
1452         return 1;
1453 }
1454
1455 /*
1456  * look for inline back ref. if back ref is found, *ref_ret is set
1457  * to the address of inline back ref, and 0 is returned.
1458  *
1459  * if back ref isn't found, *ref_ret is set to the address where it
1460  * should be inserted, and -ENOENT is returned.
1461  *
1462  * if insert is true and there are too many inline back refs, the path
1463  * points to the extent item, and -EAGAIN is returned.
1464  *
1465  * NOTE: inline back refs are ordered in the same way that back ref
1466  *       items in the tree are ordered.
1467  */
1468 static noinline_for_stack
1469 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1470                                  struct btrfs_root *root,
1471                                  struct btrfs_path *path,
1472                                  struct btrfs_extent_inline_ref **ref_ret,
1473                                  u64 bytenr, u64 num_bytes,
1474                                  u64 parent, u64 root_objectid,
1475                                  u64 owner, u64 offset, int insert)
1476 {
1477         struct btrfs_key key;
1478         struct extent_buffer *leaf;
1479         struct btrfs_extent_item *ei;
1480         struct btrfs_extent_inline_ref *iref;
1481         u64 flags;
1482         u64 item_size;
1483         unsigned long ptr;
1484         unsigned long end;
1485         int extra_size;
1486         int type;
1487         int want;
1488         int ret;
1489         int err = 0;
1490         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1491                                                  SKINNY_METADATA);
1492
1493         key.objectid = bytenr;
1494         key.type = BTRFS_EXTENT_ITEM_KEY;
1495         key.offset = num_bytes;
1496
1497         want = extent_ref_type(parent, owner);
1498         if (insert) {
1499                 extra_size = btrfs_extent_inline_ref_size(want);
1500                 path->keep_locks = 1;
1501         } else
1502                 extra_size = -1;
1503
1504         /*
1505          * Owner is our parent level, so we can just add one to get the level
1506          * for the block we are interested in.
1507          */
1508         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1509                 key.type = BTRFS_METADATA_ITEM_KEY;
1510                 key.offset = owner;
1511         }
1512
1513 again:
1514         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1515         if (ret < 0) {
1516                 err = ret;
1517                 goto out;
1518         }
1519
1520         /*
1521          * We may be a newly converted file system which still has the old fat
1522          * extent entries for metadata, so try and see if we have one of those.
1523          */
1524         if (ret > 0 && skinny_metadata) {
1525                 skinny_metadata = false;
1526                 if (path->slots[0]) {
1527                         path->slots[0]--;
1528                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1529                                               path->slots[0]);
1530                         if (key.objectid == bytenr &&
1531                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1532                             key.offset == num_bytes)
1533                                 ret = 0;
1534                 }
1535                 if (ret) {
1536                         key.objectid = bytenr;
1537                         key.type = BTRFS_EXTENT_ITEM_KEY;
1538                         key.offset = num_bytes;
1539                         btrfs_release_path(path);
1540                         goto again;
1541                 }
1542         }
1543
1544         if (ret && !insert) {
1545                 err = -ENOENT;
1546                 goto out;
1547         } else if (WARN_ON(ret)) {
1548                 err = -EIO;
1549                 goto out;
1550         }
1551
1552         leaf = path->nodes[0];
1553         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1554 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1555         if (item_size < sizeof(*ei)) {
1556                 if (!insert) {
1557                         err = -ENOENT;
1558                         goto out;
1559                 }
1560                 ret = convert_extent_item_v0(trans, root, path, owner,
1561                                              extra_size);
1562                 if (ret < 0) {
1563                         err = ret;
1564                         goto out;
1565                 }
1566                 leaf = path->nodes[0];
1567                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1568         }
1569 #endif
1570         BUG_ON(item_size < sizeof(*ei));
1571
1572         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1573         flags = btrfs_extent_flags(leaf, ei);
1574
1575         ptr = (unsigned long)(ei + 1);
1576         end = (unsigned long)ei + item_size;
1577
1578         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1579                 ptr += sizeof(struct btrfs_tree_block_info);
1580                 BUG_ON(ptr > end);
1581         }
1582
1583         err = -ENOENT;
1584         while (1) {
1585                 if (ptr >= end) {
1586                         WARN_ON(ptr > end);
1587                         break;
1588                 }
1589                 iref = (struct btrfs_extent_inline_ref *)ptr;
1590                 type = btrfs_extent_inline_ref_type(leaf, iref);
1591                 if (want < type)
1592                         break;
1593                 if (want > type) {
1594                         ptr += btrfs_extent_inline_ref_size(type);
1595                         continue;
1596                 }
1597
1598                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1599                         struct btrfs_extent_data_ref *dref;
1600                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1601                         if (match_extent_data_ref(leaf, dref, root_objectid,
1602                                                   owner, offset)) {
1603                                 err = 0;
1604                                 break;
1605                         }
1606                         if (hash_extent_data_ref_item(leaf, dref) <
1607                             hash_extent_data_ref(root_objectid, owner, offset))
1608                                 break;
1609                 } else {
1610                         u64 ref_offset;
1611                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1612                         if (parent > 0) {
1613                                 if (parent == ref_offset) {
1614                                         err = 0;
1615                                         break;
1616                                 }
1617                                 if (ref_offset < parent)
1618                                         break;
1619                         } else {
1620                                 if (root_objectid == ref_offset) {
1621                                         err = 0;
1622                                         break;
1623                                 }
1624                                 if (ref_offset < root_objectid)
1625                                         break;
1626                         }
1627                 }
1628                 ptr += btrfs_extent_inline_ref_size(type);
1629         }
1630         if (err == -ENOENT && insert) {
1631                 if (item_size + extra_size >=
1632                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1633                         err = -EAGAIN;
1634                         goto out;
1635                 }
1636                 /*
1637                  * To add new inline back ref, we have to make sure
1638                  * there is no corresponding back ref item.
1639                  * For simplicity, we just do not add new inline back
1640                  * ref if there is any kind of item for this block
1641                  */
1642                 if (find_next_key(path, 0, &key) == 0 &&
1643                     key.objectid == bytenr &&
1644                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1645                         err = -EAGAIN;
1646                         goto out;
1647                 }
1648         }
1649         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1650 out:
1651         if (insert) {
1652                 path->keep_locks = 0;
1653                 btrfs_unlock_up_safe(path, 1);
1654         }
1655         return err;
1656 }
1657
1658 /*
1659  * helper to add new inline back ref
1660  */
1661 static noinline_for_stack
1662 void setup_inline_extent_backref(struct btrfs_root *root,
1663                                  struct btrfs_path *path,
1664                                  struct btrfs_extent_inline_ref *iref,
1665                                  u64 parent, u64 root_objectid,
1666                                  u64 owner, u64 offset, int refs_to_add,
1667                                  struct btrfs_delayed_extent_op *extent_op)
1668 {
1669         struct extent_buffer *leaf;
1670         struct btrfs_extent_item *ei;
1671         unsigned long ptr;
1672         unsigned long end;
1673         unsigned long item_offset;
1674         u64 refs;
1675         int size;
1676         int type;
1677
1678         leaf = path->nodes[0];
1679         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1680         item_offset = (unsigned long)iref - (unsigned long)ei;
1681
1682         type = extent_ref_type(parent, owner);
1683         size = btrfs_extent_inline_ref_size(type);
1684
1685         btrfs_extend_item(root, path, size);
1686
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         refs = btrfs_extent_refs(leaf, ei);
1689         refs += refs_to_add;
1690         btrfs_set_extent_refs(leaf, ei, refs);
1691         if (extent_op)
1692                 __run_delayed_extent_op(extent_op, leaf, ei);
1693
1694         ptr = (unsigned long)ei + item_offset;
1695         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1696         if (ptr < end - size)
1697                 memmove_extent_buffer(leaf, ptr + size, ptr,
1698                                       end - size - ptr);
1699
1700         iref = (struct btrfs_extent_inline_ref *)ptr;
1701         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1702         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1703                 struct btrfs_extent_data_ref *dref;
1704                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1705                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1706                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1707                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1708                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1709         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710                 struct btrfs_shared_data_ref *sref;
1711                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1712                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1713                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1714         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1715                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1716         } else {
1717                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1718         }
1719         btrfs_mark_buffer_dirty(leaf);
1720 }
1721
1722 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1723                                  struct btrfs_root *root,
1724                                  struct btrfs_path *path,
1725                                  struct btrfs_extent_inline_ref **ref_ret,
1726                                  u64 bytenr, u64 num_bytes, u64 parent,
1727                                  u64 root_objectid, u64 owner, u64 offset)
1728 {
1729         int ret;
1730
1731         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1732                                            bytenr, num_bytes, parent,
1733                                            root_objectid, owner, offset, 0);
1734         if (ret != -ENOENT)
1735                 return ret;
1736
1737         btrfs_release_path(path);
1738         *ref_ret = NULL;
1739
1740         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1741                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1742                                             root_objectid);
1743         } else {
1744                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1745                                              root_objectid, owner, offset);
1746         }
1747         return ret;
1748 }
1749
1750 /*
1751  * helper to update/remove inline back ref
1752  */
1753 static noinline_for_stack
1754 void update_inline_extent_backref(struct btrfs_root *root,
1755                                   struct btrfs_path *path,
1756                                   struct btrfs_extent_inline_ref *iref,
1757                                   int refs_to_mod,
1758                                   struct btrfs_delayed_extent_op *extent_op,
1759                                   int *last_ref)
1760 {
1761         struct extent_buffer *leaf;
1762         struct btrfs_extent_item *ei;
1763         struct btrfs_extent_data_ref *dref = NULL;
1764         struct btrfs_shared_data_ref *sref = NULL;
1765         unsigned long ptr;
1766         unsigned long end;
1767         u32 item_size;
1768         int size;
1769         int type;
1770         u64 refs;
1771
1772         leaf = path->nodes[0];
1773         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1774         refs = btrfs_extent_refs(leaf, ei);
1775         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1776         refs += refs_to_mod;
1777         btrfs_set_extent_refs(leaf, ei, refs);
1778         if (extent_op)
1779                 __run_delayed_extent_op(extent_op, leaf, ei);
1780
1781         type = btrfs_extent_inline_ref_type(leaf, iref);
1782
1783         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1784                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1785                 refs = btrfs_extent_data_ref_count(leaf, dref);
1786         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1787                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1788                 refs = btrfs_shared_data_ref_count(leaf, sref);
1789         } else {
1790                 refs = 1;
1791                 BUG_ON(refs_to_mod != -1);
1792         }
1793
1794         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1795         refs += refs_to_mod;
1796
1797         if (refs > 0) {
1798                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1799                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1800                 else
1801                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1802         } else {
1803                 *last_ref = 1;
1804                 size =  btrfs_extent_inline_ref_size(type);
1805                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1806                 ptr = (unsigned long)iref;
1807                 end = (unsigned long)ei + item_size;
1808                 if (ptr + size < end)
1809                         memmove_extent_buffer(leaf, ptr, ptr + size,
1810                                               end - ptr - size);
1811                 item_size -= size;
1812                 btrfs_truncate_item(root, path, item_size, 1);
1813         }
1814         btrfs_mark_buffer_dirty(leaf);
1815 }
1816
1817 static noinline_for_stack
1818 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1819                                  struct btrfs_root *root,
1820                                  struct btrfs_path *path,
1821                                  u64 bytenr, u64 num_bytes, u64 parent,
1822                                  u64 root_objectid, u64 owner,
1823                                  u64 offset, int refs_to_add,
1824                                  struct btrfs_delayed_extent_op *extent_op)
1825 {
1826         struct btrfs_extent_inline_ref *iref;
1827         int ret;
1828
1829         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1830                                            bytenr, num_bytes, parent,
1831                                            root_objectid, owner, offset, 1);
1832         if (ret == 0) {
1833                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1834                 update_inline_extent_backref(root, path, iref,
1835                                              refs_to_add, extent_op, NULL);
1836         } else if (ret == -ENOENT) {
1837                 setup_inline_extent_backref(root, path, iref, parent,
1838                                             root_objectid, owner, offset,
1839                                             refs_to_add, extent_op);
1840                 ret = 0;
1841         }
1842         return ret;
1843 }
1844
1845 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1846                                  struct btrfs_root *root,
1847                                  struct btrfs_path *path,
1848                                  u64 bytenr, u64 parent, u64 root_objectid,
1849                                  u64 owner, u64 offset, int refs_to_add)
1850 {
1851         int ret;
1852         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1853                 BUG_ON(refs_to_add != 1);
1854                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1855                                             parent, root_objectid);
1856         } else {
1857                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1858                                              parent, root_objectid,
1859                                              owner, offset, refs_to_add);
1860         }
1861         return ret;
1862 }
1863
1864 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1865                                  struct btrfs_root *root,
1866                                  struct btrfs_path *path,
1867                                  struct btrfs_extent_inline_ref *iref,
1868                                  int refs_to_drop, int is_data, int *last_ref)
1869 {
1870         int ret = 0;
1871
1872         BUG_ON(!is_data && refs_to_drop != 1);
1873         if (iref) {
1874                 update_inline_extent_backref(root, path, iref,
1875                                              -refs_to_drop, NULL, last_ref);
1876         } else if (is_data) {
1877                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1878                                              last_ref);
1879         } else {
1880                 *last_ref = 1;
1881                 ret = btrfs_del_item(trans, root, path);
1882         }
1883         return ret;
1884 }
1885
1886 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1887                                u64 *discarded_bytes)
1888 {
1889         int ret = 0;
1890         u64 aligned_start = ALIGN(start, 1 << 9);
1891
1892         if (WARN_ON(start != aligned_start)) {
1893                 len -= aligned_start - start;
1894                 len = round_down(len, 1 << 9);
1895                 start = aligned_start;
1896         }
1897
1898         *discarded_bytes = 0;
1899         if (len) {
1900                 ret = blkdev_issue_discard(bdev, start >> 9, len >> 9,
1901                                            GFP_NOFS, 0);
1902                 if (!ret)
1903                         *discarded_bytes = len;
1904         }
1905         return ret;
1906 }
1907
1908 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1909                          u64 num_bytes, u64 *actual_bytes)
1910 {
1911         int ret;
1912         u64 discarded_bytes = 0;
1913         struct btrfs_bio *bbio = NULL;
1914
1915
1916         /* Tell the block device(s) that the sectors can be discarded */
1917         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1918                               bytenr, &num_bytes, &bbio, 0);
1919         /* Error condition is -ENOMEM */
1920         if (!ret) {
1921                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1922                 int i;
1923
1924
1925                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1926                         u64 bytes;
1927                         if (!stripe->dev->can_discard)
1928                                 continue;
1929
1930                         ret = btrfs_issue_discard(stripe->dev->bdev,
1931                                                   stripe->physical,
1932                                                   stripe->length,
1933                                                   &bytes);
1934                         if (!ret)
1935                                 discarded_bytes += bytes;
1936                         else if (ret != -EOPNOTSUPP)
1937                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1938
1939                         /*
1940                          * Just in case we get back EOPNOTSUPP for some reason,
1941                          * just ignore the return value so we don't screw up
1942                          * people calling discard_extent.
1943                          */
1944                         ret = 0;
1945                 }
1946                 btrfs_put_bbio(bbio);
1947         }
1948
1949         if (actual_bytes)
1950                 *actual_bytes = discarded_bytes;
1951
1952
1953         if (ret == -EOPNOTSUPP)
1954                 ret = 0;
1955         return ret;
1956 }
1957
1958 /* Can return -ENOMEM */
1959 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1960                          struct btrfs_root *root,
1961                          u64 bytenr, u64 num_bytes, u64 parent,
1962                          u64 root_objectid, u64 owner, u64 offset,
1963                          int no_quota)
1964 {
1965         int ret;
1966         struct btrfs_fs_info *fs_info = root->fs_info;
1967
1968         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1969                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1970
1971         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1972                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1973                                         num_bytes,
1974                                         parent, root_objectid, (int)owner,
1975                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1976         } else {
1977                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1978                                         num_bytes,
1979                                         parent, root_objectid, owner, offset,
1980                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1981         }
1982         return ret;
1983 }
1984
1985 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1986                                   struct btrfs_root *root,
1987                                   struct btrfs_delayed_ref_node *node,
1988                                   u64 parent, u64 root_objectid,
1989                                   u64 owner, u64 offset, int refs_to_add,
1990                                   struct btrfs_delayed_extent_op *extent_op)
1991 {
1992         struct btrfs_fs_info *fs_info = root->fs_info;
1993         struct btrfs_path *path;
1994         struct extent_buffer *leaf;
1995         struct btrfs_extent_item *item;
1996         struct btrfs_key key;
1997         u64 bytenr = node->bytenr;
1998         u64 num_bytes = node->num_bytes;
1999         u64 refs;
2000         int ret;
2001         int no_quota = node->no_quota;
2002
2003         path = btrfs_alloc_path();
2004         if (!path)
2005                 return -ENOMEM;
2006
2007         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2008                 no_quota = 1;
2009
2010         path->reada = 1;
2011         path->leave_spinning = 1;
2012         /* this will setup the path even if it fails to insert the back ref */
2013         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2014                                            bytenr, num_bytes, parent,
2015                                            root_objectid, owner, offset,
2016                                            refs_to_add, extent_op);
2017         if ((ret < 0 && ret != -EAGAIN) || !ret)
2018                 goto out;
2019
2020         /*
2021          * Ok we had -EAGAIN which means we didn't have space to insert and
2022          * inline extent ref, so just update the reference count and add a
2023          * normal backref.
2024          */
2025         leaf = path->nodes[0];
2026         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2027         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2028         refs = btrfs_extent_refs(leaf, item);
2029         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2030         if (extent_op)
2031                 __run_delayed_extent_op(extent_op, leaf, item);
2032
2033         btrfs_mark_buffer_dirty(leaf);
2034         btrfs_release_path(path);
2035
2036         path->reada = 1;
2037         path->leave_spinning = 1;
2038         /* now insert the actual backref */
2039         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2040                                     path, bytenr, parent, root_objectid,
2041                                     owner, offset, refs_to_add);
2042         if (ret)
2043                 btrfs_abort_transaction(trans, root, ret);
2044 out:
2045         btrfs_free_path(path);
2046         return ret;
2047 }
2048
2049 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2050                                 struct btrfs_root *root,
2051                                 struct btrfs_delayed_ref_node *node,
2052                                 struct btrfs_delayed_extent_op *extent_op,
2053                                 int insert_reserved)
2054 {
2055         int ret = 0;
2056         struct btrfs_delayed_data_ref *ref;
2057         struct btrfs_key ins;
2058         u64 parent = 0;
2059         u64 ref_root = 0;
2060         u64 flags = 0;
2061
2062         ins.objectid = node->bytenr;
2063         ins.offset = node->num_bytes;
2064         ins.type = BTRFS_EXTENT_ITEM_KEY;
2065
2066         ref = btrfs_delayed_node_to_data_ref(node);
2067         trace_run_delayed_data_ref(node, ref, node->action);
2068
2069         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2070                 parent = ref->parent;
2071         ref_root = ref->root;
2072
2073         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2074                 if (extent_op)
2075                         flags |= extent_op->flags_to_set;
2076                 ret = alloc_reserved_file_extent(trans, root,
2077                                                  parent, ref_root, flags,
2078                                                  ref->objectid, ref->offset,
2079                                                  &ins, node->ref_mod);
2080         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2081                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2082                                              ref_root, ref->objectid,
2083                                              ref->offset, node->ref_mod,
2084                                              extent_op);
2085         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2086                 ret = __btrfs_free_extent(trans, root, node, parent,
2087                                           ref_root, ref->objectid,
2088                                           ref->offset, node->ref_mod,
2089                                           extent_op);
2090         } else {
2091                 BUG();
2092         }
2093         return ret;
2094 }
2095
2096 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2097                                     struct extent_buffer *leaf,
2098                                     struct btrfs_extent_item *ei)
2099 {
2100         u64 flags = btrfs_extent_flags(leaf, ei);
2101         if (extent_op->update_flags) {
2102                 flags |= extent_op->flags_to_set;
2103                 btrfs_set_extent_flags(leaf, ei, flags);
2104         }
2105
2106         if (extent_op->update_key) {
2107                 struct btrfs_tree_block_info *bi;
2108                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2109                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2110                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2111         }
2112 }
2113
2114 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2115                                  struct btrfs_root *root,
2116                                  struct btrfs_delayed_ref_node *node,
2117                                  struct btrfs_delayed_extent_op *extent_op)
2118 {
2119         struct btrfs_key key;
2120         struct btrfs_path *path;
2121         struct btrfs_extent_item *ei;
2122         struct extent_buffer *leaf;
2123         u32 item_size;
2124         int ret;
2125         int err = 0;
2126         int metadata = !extent_op->is_data;
2127
2128         if (trans->aborted)
2129                 return 0;
2130
2131         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2132                 metadata = 0;
2133
2134         path = btrfs_alloc_path();
2135         if (!path)
2136                 return -ENOMEM;
2137
2138         key.objectid = node->bytenr;
2139
2140         if (metadata) {
2141                 key.type = BTRFS_METADATA_ITEM_KEY;
2142                 key.offset = extent_op->level;
2143         } else {
2144                 key.type = BTRFS_EXTENT_ITEM_KEY;
2145                 key.offset = node->num_bytes;
2146         }
2147
2148 again:
2149         path->reada = 1;
2150         path->leave_spinning = 1;
2151         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2152                                 path, 0, 1);
2153         if (ret < 0) {
2154                 err = ret;
2155                 goto out;
2156         }
2157         if (ret > 0) {
2158                 if (metadata) {
2159                         if (path->slots[0] > 0) {
2160                                 path->slots[0]--;
2161                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2162                                                       path->slots[0]);
2163                                 if (key.objectid == node->bytenr &&
2164                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2165                                     key.offset == node->num_bytes)
2166                                         ret = 0;
2167                         }
2168                         if (ret > 0) {
2169                                 btrfs_release_path(path);
2170                                 metadata = 0;
2171
2172                                 key.objectid = node->bytenr;
2173                                 key.offset = node->num_bytes;
2174                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2175                                 goto again;
2176                         }
2177                 } else {
2178                         err = -EIO;
2179                         goto out;
2180                 }
2181         }
2182
2183         leaf = path->nodes[0];
2184         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2185 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2186         if (item_size < sizeof(*ei)) {
2187                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2188                                              path, (u64)-1, 0);
2189                 if (ret < 0) {
2190                         err = ret;
2191                         goto out;
2192                 }
2193                 leaf = path->nodes[0];
2194                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2195         }
2196 #endif
2197         BUG_ON(item_size < sizeof(*ei));
2198         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2199         __run_delayed_extent_op(extent_op, leaf, ei);
2200
2201         btrfs_mark_buffer_dirty(leaf);
2202 out:
2203         btrfs_free_path(path);
2204         return err;
2205 }
2206
2207 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2208                                 struct btrfs_root *root,
2209                                 struct btrfs_delayed_ref_node *node,
2210                                 struct btrfs_delayed_extent_op *extent_op,
2211                                 int insert_reserved)
2212 {
2213         int ret = 0;
2214         struct btrfs_delayed_tree_ref *ref;
2215         struct btrfs_key ins;
2216         u64 parent = 0;
2217         u64 ref_root = 0;
2218         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2219                                                  SKINNY_METADATA);
2220
2221         ref = btrfs_delayed_node_to_tree_ref(node);
2222         trace_run_delayed_tree_ref(node, ref, node->action);
2223
2224         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2225                 parent = ref->parent;
2226         ref_root = ref->root;
2227
2228         ins.objectid = node->bytenr;
2229         if (skinny_metadata) {
2230                 ins.offset = ref->level;
2231                 ins.type = BTRFS_METADATA_ITEM_KEY;
2232         } else {
2233                 ins.offset = node->num_bytes;
2234                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2235         }
2236
2237         BUG_ON(node->ref_mod != 1);
2238         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2239                 BUG_ON(!extent_op || !extent_op->update_flags);
2240                 ret = alloc_reserved_tree_block(trans, root,
2241                                                 parent, ref_root,
2242                                                 extent_op->flags_to_set,
2243                                                 &extent_op->key,
2244                                                 ref->level, &ins,
2245                                                 node->no_quota);
2246         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2247                 ret = __btrfs_inc_extent_ref(trans, root, node,
2248                                              parent, ref_root,
2249                                              ref->level, 0, 1,
2250                                              extent_op);
2251         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2252                 ret = __btrfs_free_extent(trans, root, node,
2253                                           parent, ref_root,
2254                                           ref->level, 0, 1, extent_op);
2255         } else {
2256                 BUG();
2257         }
2258         return ret;
2259 }
2260
2261 /* helper function to actually process a single delayed ref entry */
2262 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2263                                struct btrfs_root *root,
2264                                struct btrfs_delayed_ref_node *node,
2265                                struct btrfs_delayed_extent_op *extent_op,
2266                                int insert_reserved)
2267 {
2268         int ret = 0;
2269
2270         if (trans->aborted) {
2271                 if (insert_reserved)
2272                         btrfs_pin_extent(root, node->bytenr,
2273                                          node->num_bytes, 1);
2274                 return 0;
2275         }
2276
2277         if (btrfs_delayed_ref_is_head(node)) {
2278                 struct btrfs_delayed_ref_head *head;
2279                 /*
2280                  * we've hit the end of the chain and we were supposed
2281                  * to insert this extent into the tree.  But, it got
2282                  * deleted before we ever needed to insert it, so all
2283                  * we have to do is clean up the accounting
2284                  */
2285                 BUG_ON(extent_op);
2286                 head = btrfs_delayed_node_to_head(node);
2287                 trace_run_delayed_ref_head(node, head, node->action);
2288
2289                 if (insert_reserved) {
2290                         btrfs_pin_extent(root, node->bytenr,
2291                                          node->num_bytes, 1);
2292                         if (head->is_data) {
2293                                 ret = btrfs_del_csums(trans, root,
2294                                                       node->bytenr,
2295                                                       node->num_bytes);
2296                         }
2297                 }
2298                 return ret;
2299         }
2300
2301         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2302             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2303                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2304                                            insert_reserved);
2305         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2306                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2307                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2308                                            insert_reserved);
2309         else
2310                 BUG();
2311         return ret;
2312 }
2313
2314 static inline struct btrfs_delayed_ref_node *
2315 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2316 {
2317         struct btrfs_delayed_ref_node *ref;
2318
2319         if (list_empty(&head->ref_list))
2320                 return NULL;
2321
2322         /*
2323          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2324          * This is to prevent a ref count from going down to zero, which deletes
2325          * the extent item from the extent tree, when there still are references
2326          * to add, which would fail because they would not find the extent item.
2327          */
2328         list_for_each_entry(ref, &head->ref_list, list) {
2329                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2330                         return ref;
2331         }
2332
2333         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2334                           list);
2335 }
2336
2337 /*
2338  * Returns 0 on success or if called with an already aborted transaction.
2339  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2340  */
2341 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2342                                              struct btrfs_root *root,
2343                                              unsigned long nr)
2344 {
2345         struct btrfs_delayed_ref_root *delayed_refs;
2346         struct btrfs_delayed_ref_node *ref;
2347         struct btrfs_delayed_ref_head *locked_ref = NULL;
2348         struct btrfs_delayed_extent_op *extent_op;
2349         struct btrfs_fs_info *fs_info = root->fs_info;
2350         ktime_t start = ktime_get();
2351         int ret;
2352         unsigned long count = 0;
2353         unsigned long actual_count = 0;
2354         int must_insert_reserved = 0;
2355
2356         delayed_refs = &trans->transaction->delayed_refs;
2357         while (1) {
2358                 if (!locked_ref) {
2359                         if (count >= nr)
2360                                 break;
2361
2362                         spin_lock(&delayed_refs->lock);
2363                         locked_ref = btrfs_select_ref_head(trans);
2364                         if (!locked_ref) {
2365                                 spin_unlock(&delayed_refs->lock);
2366                                 break;
2367                         }
2368
2369                         /* grab the lock that says we are going to process
2370                          * all the refs for this head */
2371                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2372                         spin_unlock(&delayed_refs->lock);
2373                         /*
2374                          * we may have dropped the spin lock to get the head
2375                          * mutex lock, and that might have given someone else
2376                          * time to free the head.  If that's true, it has been
2377                          * removed from our list and we can move on.
2378                          */
2379                         if (ret == -EAGAIN) {
2380                                 locked_ref = NULL;
2381                                 count++;
2382                                 continue;
2383                         }
2384                 }
2385
2386                 spin_lock(&locked_ref->lock);
2387
2388                 /*
2389                  * locked_ref is the head node, so we have to go one
2390                  * node back for any delayed ref updates
2391                  */
2392                 ref = select_delayed_ref(locked_ref);
2393
2394                 if (ref && ref->seq &&
2395                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2396                         spin_unlock(&locked_ref->lock);
2397                         btrfs_delayed_ref_unlock(locked_ref);
2398                         spin_lock(&delayed_refs->lock);
2399                         locked_ref->processing = 0;
2400                         delayed_refs->num_heads_ready++;
2401                         spin_unlock(&delayed_refs->lock);
2402                         locked_ref = NULL;
2403                         cond_resched();
2404                         count++;
2405                         continue;
2406                 }
2407
2408                 /*
2409                  * record the must insert reserved flag before we
2410                  * drop the spin lock.
2411                  */
2412                 must_insert_reserved = locked_ref->must_insert_reserved;
2413                 locked_ref->must_insert_reserved = 0;
2414
2415                 extent_op = locked_ref->extent_op;
2416                 locked_ref->extent_op = NULL;
2417
2418                 if (!ref) {
2419
2420
2421                         /* All delayed refs have been processed, Go ahead
2422                          * and send the head node to run_one_delayed_ref,
2423                          * so that any accounting fixes can happen
2424                          */
2425                         ref = &locked_ref->node;
2426
2427                         if (extent_op && must_insert_reserved) {
2428                                 btrfs_free_delayed_extent_op(extent_op);
2429                                 extent_op = NULL;
2430                         }
2431
2432                         if (extent_op) {
2433                                 spin_unlock(&locked_ref->lock);
2434                                 ret = run_delayed_extent_op(trans, root,
2435                                                             ref, extent_op);
2436                                 btrfs_free_delayed_extent_op(extent_op);
2437
2438                                 if (ret) {
2439                                         /*
2440                                          * Need to reset must_insert_reserved if
2441                                          * there was an error so the abort stuff
2442                                          * can cleanup the reserved space
2443                                          * properly.
2444                                          */
2445                                         if (must_insert_reserved)
2446                                                 locked_ref->must_insert_reserved = 1;
2447                                         locked_ref->processing = 0;
2448                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2449                                         btrfs_delayed_ref_unlock(locked_ref);
2450                                         return ret;
2451                                 }
2452                                 continue;
2453                         }
2454
2455                         /*
2456                          * Need to drop our head ref lock and re-aqcuire the
2457                          * delayed ref lock and then re-check to make sure
2458                          * nobody got added.
2459                          */
2460                         spin_unlock(&locked_ref->lock);
2461                         spin_lock(&delayed_refs->lock);
2462                         spin_lock(&locked_ref->lock);
2463                         if (!list_empty(&locked_ref->ref_list) ||
2464                             locked_ref->extent_op) {
2465                                 spin_unlock(&locked_ref->lock);
2466                                 spin_unlock(&delayed_refs->lock);
2467                                 continue;
2468                         }
2469                         ref->in_tree = 0;
2470                         delayed_refs->num_heads--;
2471                         rb_erase(&locked_ref->href_node,
2472                                  &delayed_refs->href_root);
2473                         spin_unlock(&delayed_refs->lock);
2474                 } else {
2475                         actual_count++;
2476                         ref->in_tree = 0;
2477                         list_del(&ref->list);
2478                 }
2479                 atomic_dec(&delayed_refs->num_entries);
2480
2481                 if (!btrfs_delayed_ref_is_head(ref)) {
2482                         /*
2483                          * when we play the delayed ref, also correct the
2484                          * ref_mod on head
2485                          */
2486                         switch (ref->action) {
2487                         case BTRFS_ADD_DELAYED_REF:
2488                         case BTRFS_ADD_DELAYED_EXTENT:
2489                                 locked_ref->node.ref_mod -= ref->ref_mod;
2490                                 break;
2491                         case BTRFS_DROP_DELAYED_REF:
2492                                 locked_ref->node.ref_mod += ref->ref_mod;
2493                                 break;
2494                         default:
2495                                 WARN_ON(1);
2496                         }
2497                 }
2498                 spin_unlock(&locked_ref->lock);
2499
2500                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2501                                           must_insert_reserved);
2502
2503                 btrfs_free_delayed_extent_op(extent_op);
2504                 if (ret) {
2505                         locked_ref->processing = 0;
2506                         btrfs_delayed_ref_unlock(locked_ref);
2507                         btrfs_put_delayed_ref(ref);
2508                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2509                         return ret;
2510                 }
2511
2512                 /*
2513                  * If this node is a head, that means all the refs in this head
2514                  * have been dealt with, and we will pick the next head to deal
2515                  * with, so we must unlock the head and drop it from the cluster
2516                  * list before we release it.
2517                  */
2518                 if (btrfs_delayed_ref_is_head(ref)) {
2519                         if (locked_ref->is_data &&
2520                             locked_ref->total_ref_mod < 0) {
2521                                 spin_lock(&delayed_refs->lock);
2522                                 delayed_refs->pending_csums -= ref->num_bytes;
2523                                 spin_unlock(&delayed_refs->lock);
2524                         }
2525                         btrfs_delayed_ref_unlock(locked_ref);
2526                         locked_ref = NULL;
2527                 }
2528                 btrfs_put_delayed_ref(ref);
2529                 count++;
2530                 cond_resched();
2531         }
2532
2533         /*
2534          * We don't want to include ref heads since we can have empty ref heads
2535          * and those will drastically skew our runtime down since we just do
2536          * accounting, no actual extent tree updates.
2537          */
2538         if (actual_count > 0) {
2539                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2540                 u64 avg;
2541
2542                 /*
2543                  * We weigh the current average higher than our current runtime
2544                  * to avoid large swings in the average.
2545                  */
2546                 spin_lock(&delayed_refs->lock);
2547                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2548                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2549                 spin_unlock(&delayed_refs->lock);
2550         }
2551         return 0;
2552 }
2553
2554 #ifdef SCRAMBLE_DELAYED_REFS
2555 /*
2556  * Normally delayed refs get processed in ascending bytenr order. This
2557  * correlates in most cases to the order added. To expose dependencies on this
2558  * order, we start to process the tree in the middle instead of the beginning
2559  */
2560 static u64 find_middle(struct rb_root *root)
2561 {
2562         struct rb_node *n = root->rb_node;
2563         struct btrfs_delayed_ref_node *entry;
2564         int alt = 1;
2565         u64 middle;
2566         u64 first = 0, last = 0;
2567
2568         n = rb_first(root);
2569         if (n) {
2570                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2571                 first = entry->bytenr;
2572         }
2573         n = rb_last(root);
2574         if (n) {
2575                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2576                 last = entry->bytenr;
2577         }
2578         n = root->rb_node;
2579
2580         while (n) {
2581                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2582                 WARN_ON(!entry->in_tree);
2583
2584                 middle = entry->bytenr;
2585
2586                 if (alt)
2587                         n = n->rb_left;
2588                 else
2589                         n = n->rb_right;
2590
2591                 alt = 1 - alt;
2592         }
2593         return middle;
2594 }
2595 #endif
2596
2597 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2598 {
2599         u64 num_bytes;
2600
2601         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2602                              sizeof(struct btrfs_extent_inline_ref));
2603         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2604                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2605
2606         /*
2607          * We don't ever fill up leaves all the way so multiply by 2 just to be
2608          * closer to what we're really going to want to ouse.
2609          */
2610         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2611 }
2612
2613 /*
2614  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2615  * would require to store the csums for that many bytes.
2616  */
2617 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2618 {
2619         u64 csum_size;
2620         u64 num_csums_per_leaf;
2621         u64 num_csums;
2622
2623         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2624         num_csums_per_leaf = div64_u64(csum_size,
2625                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2626         num_csums = div64_u64(csum_bytes, root->sectorsize);
2627         num_csums += num_csums_per_leaf - 1;
2628         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2629         return num_csums;
2630 }
2631
2632 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2633                                        struct btrfs_root *root)
2634 {
2635         struct btrfs_block_rsv *global_rsv;
2636         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2637         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2638         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2639         u64 num_bytes, num_dirty_bgs_bytes;
2640         int ret = 0;
2641
2642         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2643         num_heads = heads_to_leaves(root, num_heads);
2644         if (num_heads > 1)
2645                 num_bytes += (num_heads - 1) * root->nodesize;
2646         num_bytes <<= 1;
2647         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2648         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2649                                                              num_dirty_bgs);
2650         global_rsv = &root->fs_info->global_block_rsv;
2651
2652         /*
2653          * If we can't allocate any more chunks lets make sure we have _lots_ of
2654          * wiggle room since running delayed refs can create more delayed refs.
2655          */
2656         if (global_rsv->space_info->full) {
2657                 num_dirty_bgs_bytes <<= 1;
2658                 num_bytes <<= 1;
2659         }
2660
2661         spin_lock(&global_rsv->lock);
2662         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2663                 ret = 1;
2664         spin_unlock(&global_rsv->lock);
2665         return ret;
2666 }
2667
2668 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2669                                        struct btrfs_root *root)
2670 {
2671         struct btrfs_fs_info *fs_info = root->fs_info;
2672         u64 num_entries =
2673                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2674         u64 avg_runtime;
2675         u64 val;
2676
2677         smp_mb();
2678         avg_runtime = fs_info->avg_delayed_ref_runtime;
2679         val = num_entries * avg_runtime;
2680         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2681                 return 1;
2682         if (val >= NSEC_PER_SEC / 2)
2683                 return 2;
2684
2685         return btrfs_check_space_for_delayed_refs(trans, root);
2686 }
2687
2688 struct async_delayed_refs {
2689         struct btrfs_root *root;
2690         int count;
2691         int error;
2692         int sync;
2693         struct completion wait;
2694         struct btrfs_work work;
2695 };
2696
2697 static void delayed_ref_async_start(struct btrfs_work *work)
2698 {
2699         struct async_delayed_refs *async;
2700         struct btrfs_trans_handle *trans;
2701         int ret;
2702
2703         async = container_of(work, struct async_delayed_refs, work);
2704
2705         trans = btrfs_join_transaction(async->root);
2706         if (IS_ERR(trans)) {
2707                 async->error = PTR_ERR(trans);
2708                 goto done;
2709         }
2710
2711         /*
2712          * trans->sync means that when we call end_transaciton, we won't
2713          * wait on delayed refs
2714          */
2715         trans->sync = true;
2716         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2717         if (ret)
2718                 async->error = ret;
2719
2720         ret = btrfs_end_transaction(trans, async->root);
2721         if (ret && !async->error)
2722                 async->error = ret;
2723 done:
2724         if (async->sync)
2725                 complete(&async->wait);
2726         else
2727                 kfree(async);
2728 }
2729
2730 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2731                                  unsigned long count, int wait)
2732 {
2733         struct async_delayed_refs *async;
2734         int ret;
2735
2736         async = kmalloc(sizeof(*async), GFP_NOFS);
2737         if (!async)
2738                 return -ENOMEM;
2739
2740         async->root = root->fs_info->tree_root;
2741         async->count = count;
2742         async->error = 0;
2743         if (wait)
2744                 async->sync = 1;
2745         else
2746                 async->sync = 0;
2747         init_completion(&async->wait);
2748
2749         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2750                         delayed_ref_async_start, NULL, NULL);
2751
2752         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2753
2754         if (wait) {
2755                 wait_for_completion(&async->wait);
2756                 ret = async->error;
2757                 kfree(async);
2758                 return ret;
2759         }
2760         return 0;
2761 }
2762
2763 /*
2764  * this starts processing the delayed reference count updates and
2765  * extent insertions we have queued up so far.  count can be
2766  * 0, which means to process everything in the tree at the start
2767  * of the run (but not newly added entries), or it can be some target
2768  * number you'd like to process.
2769  *
2770  * Returns 0 on success or if called with an aborted transaction
2771  * Returns <0 on error and aborts the transaction
2772  */
2773 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2774                            struct btrfs_root *root, unsigned long count)
2775 {
2776         struct rb_node *node;
2777         struct btrfs_delayed_ref_root *delayed_refs;
2778         struct btrfs_delayed_ref_head *head;
2779         int ret;
2780         int run_all = count == (unsigned long)-1;
2781
2782         /* We'll clean this up in btrfs_cleanup_transaction */
2783         if (trans->aborted)
2784                 return 0;
2785
2786         if (root == root->fs_info->extent_root)
2787                 root = root->fs_info->tree_root;
2788
2789         delayed_refs = &trans->transaction->delayed_refs;
2790         if (count == 0)
2791                 count = atomic_read(&delayed_refs->num_entries) * 2;
2792
2793 again:
2794 #ifdef SCRAMBLE_DELAYED_REFS
2795         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2796 #endif
2797         ret = __btrfs_run_delayed_refs(trans, root, count);
2798         if (ret < 0) {
2799                 btrfs_abort_transaction(trans, root, ret);
2800                 return ret;
2801         }
2802
2803         if (run_all) {
2804                 if (!list_empty(&trans->new_bgs))
2805                         btrfs_create_pending_block_groups(trans, root);
2806
2807                 spin_lock(&delayed_refs->lock);
2808                 node = rb_first(&delayed_refs->href_root);
2809                 if (!node) {
2810                         spin_unlock(&delayed_refs->lock);
2811                         goto out;
2812                 }
2813                 count = (unsigned long)-1;
2814
2815                 while (node) {
2816                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2817                                         href_node);
2818                         if (btrfs_delayed_ref_is_head(&head->node)) {
2819                                 struct btrfs_delayed_ref_node *ref;
2820
2821                                 ref = &head->node;
2822                                 atomic_inc(&ref->refs);
2823
2824                                 spin_unlock(&delayed_refs->lock);
2825                                 /*
2826                                  * Mutex was contended, block until it's
2827                                  * released and try again
2828                                  */
2829                                 mutex_lock(&head->mutex);
2830                                 mutex_unlock(&head->mutex);
2831
2832                                 btrfs_put_delayed_ref(ref);
2833                                 cond_resched();
2834                                 goto again;
2835                         } else {
2836                                 WARN_ON(1);
2837                         }
2838                         node = rb_next(node);
2839                 }
2840                 spin_unlock(&delayed_refs->lock);
2841                 cond_resched();
2842                 goto again;
2843         }
2844 out:
2845         assert_qgroups_uptodate(trans);
2846         return 0;
2847 }
2848
2849 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2850                                 struct btrfs_root *root,
2851                                 u64 bytenr, u64 num_bytes, u64 flags,
2852                                 int level, int is_data)
2853 {
2854         struct btrfs_delayed_extent_op *extent_op;
2855         int ret;
2856
2857         extent_op = btrfs_alloc_delayed_extent_op();
2858         if (!extent_op)
2859                 return -ENOMEM;
2860
2861         extent_op->flags_to_set = flags;
2862         extent_op->update_flags = 1;
2863         extent_op->update_key = 0;
2864         extent_op->is_data = is_data ? 1 : 0;
2865         extent_op->level = level;
2866
2867         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2868                                           num_bytes, extent_op);
2869         if (ret)
2870                 btrfs_free_delayed_extent_op(extent_op);
2871         return ret;
2872 }
2873
2874 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2875                                       struct btrfs_root *root,
2876                                       struct btrfs_path *path,
2877                                       u64 objectid, u64 offset, u64 bytenr)
2878 {
2879         struct btrfs_delayed_ref_head *head;
2880         struct btrfs_delayed_ref_node *ref;
2881         struct btrfs_delayed_data_ref *data_ref;
2882         struct btrfs_delayed_ref_root *delayed_refs;
2883         int ret = 0;
2884
2885         delayed_refs = &trans->transaction->delayed_refs;
2886         spin_lock(&delayed_refs->lock);
2887         head = btrfs_find_delayed_ref_head(trans, bytenr);
2888         if (!head) {
2889                 spin_unlock(&delayed_refs->lock);
2890                 return 0;
2891         }
2892
2893         if (!mutex_trylock(&head->mutex)) {
2894                 atomic_inc(&head->node.refs);
2895                 spin_unlock(&delayed_refs->lock);
2896
2897                 btrfs_release_path(path);
2898
2899                 /*
2900                  * Mutex was contended, block until it's released and let
2901                  * caller try again
2902                  */
2903                 mutex_lock(&head->mutex);
2904                 mutex_unlock(&head->mutex);
2905                 btrfs_put_delayed_ref(&head->node);
2906                 return -EAGAIN;
2907         }
2908         spin_unlock(&delayed_refs->lock);
2909
2910         spin_lock(&head->lock);
2911         list_for_each_entry(ref, &head->ref_list, list) {
2912                 /* If it's a shared ref we know a cross reference exists */
2913                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2914                         ret = 1;
2915                         break;
2916                 }
2917
2918                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2919
2920                 /*
2921                  * If our ref doesn't match the one we're currently looking at
2922                  * then we have a cross reference.
2923                  */
2924                 if (data_ref->root != root->root_key.objectid ||
2925                     data_ref->objectid != objectid ||
2926                     data_ref->offset != offset) {
2927                         ret = 1;
2928                         break;
2929                 }
2930         }
2931         spin_unlock(&head->lock);
2932         mutex_unlock(&head->mutex);
2933         return ret;
2934 }
2935
2936 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2937                                         struct btrfs_root *root,
2938                                         struct btrfs_path *path,
2939                                         u64 objectid, u64 offset, u64 bytenr)
2940 {
2941         struct btrfs_root *extent_root = root->fs_info->extent_root;
2942         struct extent_buffer *leaf;
2943         struct btrfs_extent_data_ref *ref;
2944         struct btrfs_extent_inline_ref *iref;
2945         struct btrfs_extent_item *ei;
2946         struct btrfs_key key;
2947         u32 item_size;
2948         int ret;
2949
2950         key.objectid = bytenr;
2951         key.offset = (u64)-1;
2952         key.type = BTRFS_EXTENT_ITEM_KEY;
2953
2954         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2955         if (ret < 0)
2956                 goto out;
2957         BUG_ON(ret == 0); /* Corruption */
2958
2959         ret = -ENOENT;
2960         if (path->slots[0] == 0)
2961                 goto out;
2962
2963         path->slots[0]--;
2964         leaf = path->nodes[0];
2965         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2966
2967         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2968                 goto out;
2969
2970         ret = 1;
2971         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2972 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2973         if (item_size < sizeof(*ei)) {
2974                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2975                 goto out;
2976         }
2977 #endif
2978         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2979
2980         if (item_size != sizeof(*ei) +
2981             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2982                 goto out;
2983
2984         if (btrfs_extent_generation(leaf, ei) <=
2985             btrfs_root_last_snapshot(&root->root_item))
2986                 goto out;
2987
2988         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2989         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2990             BTRFS_EXTENT_DATA_REF_KEY)
2991                 goto out;
2992
2993         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2994         if (btrfs_extent_refs(leaf, ei) !=
2995             btrfs_extent_data_ref_count(leaf, ref) ||
2996             btrfs_extent_data_ref_root(leaf, ref) !=
2997             root->root_key.objectid ||
2998             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2999             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3000                 goto out;
3001
3002         ret = 0;
3003 out:
3004         return ret;
3005 }
3006
3007 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3008                           struct btrfs_root *root,
3009                           u64 objectid, u64 offset, u64 bytenr)
3010 {
3011         struct btrfs_path *path;
3012         int ret;
3013         int ret2;
3014
3015         path = btrfs_alloc_path();
3016         if (!path)
3017                 return -ENOENT;
3018
3019         do {
3020                 ret = check_committed_ref(trans, root, path, objectid,
3021                                           offset, bytenr);
3022                 if (ret && ret != -ENOENT)
3023                         goto out;
3024
3025                 ret2 = check_delayed_ref(trans, root, path, objectid,
3026                                          offset, bytenr);
3027         } while (ret2 == -EAGAIN);
3028
3029         if (ret2 && ret2 != -ENOENT) {
3030                 ret = ret2;
3031                 goto out;
3032         }
3033
3034         if (ret != -ENOENT || ret2 != -ENOENT)
3035                 ret = 0;
3036 out:
3037         btrfs_free_path(path);
3038         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3039                 WARN_ON(ret > 0);
3040         return ret;
3041 }
3042
3043 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3044                            struct btrfs_root *root,
3045                            struct extent_buffer *buf,
3046                            int full_backref, int inc)
3047 {
3048         u64 bytenr;
3049         u64 num_bytes;
3050         u64 parent;
3051         u64 ref_root;
3052         u32 nritems;
3053         struct btrfs_key key;
3054         struct btrfs_file_extent_item *fi;
3055         int i;
3056         int level;
3057         int ret = 0;
3058         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3059                             u64, u64, u64, u64, u64, u64, int);
3060
3061
3062         if (btrfs_test_is_dummy_root(root))
3063                 return 0;
3064
3065         ref_root = btrfs_header_owner(buf);
3066         nritems = btrfs_header_nritems(buf);
3067         level = btrfs_header_level(buf);
3068
3069         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3070                 return 0;
3071
3072         if (inc)
3073                 process_func = btrfs_inc_extent_ref;
3074         else
3075                 process_func = btrfs_free_extent;
3076
3077         if (full_backref)
3078                 parent = buf->start;
3079         else
3080                 parent = 0;
3081
3082         for (i = 0; i < nritems; i++) {
3083                 if (level == 0) {
3084                         btrfs_item_key_to_cpu(buf, &key, i);
3085                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3086                                 continue;
3087                         fi = btrfs_item_ptr(buf, i,
3088                                             struct btrfs_file_extent_item);
3089                         if (btrfs_file_extent_type(buf, fi) ==
3090                             BTRFS_FILE_EXTENT_INLINE)
3091                                 continue;
3092                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3093                         if (bytenr == 0)
3094                                 continue;
3095
3096                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3097                         key.offset -= btrfs_file_extent_offset(buf, fi);
3098                         ret = process_func(trans, root, bytenr, num_bytes,
3099                                            parent, ref_root, key.objectid,
3100                                            key.offset, 1);
3101                         if (ret)
3102                                 goto fail;
3103                 } else {
3104                         bytenr = btrfs_node_blockptr(buf, i);
3105                         num_bytes = root->nodesize;
3106                         ret = process_func(trans, root, bytenr, num_bytes,
3107                                            parent, ref_root, level - 1, 0,
3108                                            1);
3109                         if (ret)
3110                                 goto fail;
3111                 }
3112         }
3113         return 0;
3114 fail:
3115         return ret;
3116 }
3117
3118 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3119                   struct extent_buffer *buf, int full_backref)
3120 {
3121         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3122 }
3123
3124 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3125                   struct extent_buffer *buf, int full_backref)
3126 {
3127         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3128 }
3129
3130 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3131                                  struct btrfs_root *root,
3132                                  struct btrfs_path *path,
3133                                  struct btrfs_block_group_cache *cache)
3134 {
3135         int ret;
3136         struct btrfs_root *extent_root = root->fs_info->extent_root;
3137         unsigned long bi;
3138         struct extent_buffer *leaf;
3139
3140         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3141         if (ret) {
3142                 if (ret > 0)
3143                         ret = -ENOENT;
3144                 goto fail;
3145         }
3146
3147         leaf = path->nodes[0];
3148         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3149         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3150         btrfs_mark_buffer_dirty(leaf);
3151 fail:
3152         btrfs_release_path(path);
3153         return ret;
3154
3155 }
3156
3157 static struct btrfs_block_group_cache *
3158 next_block_group(struct btrfs_root *root,
3159                  struct btrfs_block_group_cache *cache)
3160 {
3161         struct rb_node *node;
3162
3163         spin_lock(&root->fs_info->block_group_cache_lock);
3164
3165         /* If our block group was removed, we need a full search. */
3166         if (RB_EMPTY_NODE(&cache->cache_node)) {
3167                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3168
3169                 spin_unlock(&root->fs_info->block_group_cache_lock);
3170                 btrfs_put_block_group(cache);
3171                 cache = btrfs_lookup_first_block_group(root->fs_info,
3172                                                        next_bytenr);
3173                 return cache;
3174         }
3175         node = rb_next(&cache->cache_node);
3176         btrfs_put_block_group(cache);
3177         if (node) {
3178                 cache = rb_entry(node, struct btrfs_block_group_cache,
3179                                  cache_node);
3180                 btrfs_get_block_group(cache);
3181         } else
3182                 cache = NULL;
3183         spin_unlock(&root->fs_info->block_group_cache_lock);
3184         return cache;
3185 }
3186
3187 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3188                             struct btrfs_trans_handle *trans,
3189                             struct btrfs_path *path)
3190 {
3191         struct btrfs_root *root = block_group->fs_info->tree_root;
3192         struct inode *inode = NULL;
3193         u64 alloc_hint = 0;
3194         int dcs = BTRFS_DC_ERROR;
3195         u64 num_pages = 0;
3196         int retries = 0;
3197         int ret = 0;
3198
3199         /*
3200          * If this block group is smaller than 100 megs don't bother caching the
3201          * block group.
3202          */
3203         if (block_group->key.offset < (100 * 1024 * 1024)) {
3204                 spin_lock(&block_group->lock);
3205                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3206                 spin_unlock(&block_group->lock);
3207                 return 0;
3208         }
3209
3210         if (trans->aborted)
3211                 return 0;
3212 again:
3213         inode = lookup_free_space_inode(root, block_group, path);
3214         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3215                 ret = PTR_ERR(inode);
3216                 btrfs_release_path(path);
3217                 goto out;
3218         }
3219
3220         if (IS_ERR(inode)) {
3221                 BUG_ON(retries);
3222                 retries++;
3223
3224                 if (block_group->ro)
3225                         goto out_free;
3226
3227                 ret = create_free_space_inode(root, trans, block_group, path);
3228                 if (ret)
3229                         goto out_free;
3230                 goto again;
3231         }
3232
3233         /* We've already setup this transaction, go ahead and exit */
3234         if (block_group->cache_generation == trans->transid &&
3235             i_size_read(inode)) {
3236                 dcs = BTRFS_DC_SETUP;
3237                 goto out_put;
3238         }
3239
3240         /*
3241          * We want to set the generation to 0, that way if anything goes wrong
3242          * from here on out we know not to trust this cache when we load up next
3243          * time.
3244          */
3245         BTRFS_I(inode)->generation = 0;
3246         ret = btrfs_update_inode(trans, root, inode);
3247         if (ret) {
3248                 /*
3249                  * So theoretically we could recover from this, simply set the
3250                  * super cache generation to 0 so we know to invalidate the
3251                  * cache, but then we'd have to keep track of the block groups
3252                  * that fail this way so we know we _have_ to reset this cache
3253                  * before the next commit or risk reading stale cache.  So to
3254                  * limit our exposure to horrible edge cases lets just abort the
3255                  * transaction, this only happens in really bad situations
3256                  * anyway.
3257                  */
3258                 btrfs_abort_transaction(trans, root, ret);
3259                 goto out_put;
3260         }
3261         WARN_ON(ret);
3262
3263         if (i_size_read(inode) > 0) {
3264                 ret = btrfs_check_trunc_cache_free_space(root,
3265                                         &root->fs_info->global_block_rsv);
3266                 if (ret)
3267                         goto out_put;
3268
3269                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3270                 if (ret)
3271                         goto out_put;
3272         }
3273
3274         spin_lock(&block_group->lock);
3275         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3276             !btrfs_test_opt(root, SPACE_CACHE)) {
3277                 /*
3278                  * don't bother trying to write stuff out _if_
3279                  * a) we're not cached,
3280                  * b) we're with nospace_cache mount option.
3281                  */
3282                 dcs = BTRFS_DC_WRITTEN;
3283                 spin_unlock(&block_group->lock);
3284                 goto out_put;
3285         }
3286         spin_unlock(&block_group->lock);
3287
3288         /*
3289          * Try to preallocate enough space based on how big the block group is.
3290          * Keep in mind this has to include any pinned space which could end up
3291          * taking up quite a bit since it's not folded into the other space
3292          * cache.
3293          */
3294         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3295         if (!num_pages)
3296                 num_pages = 1;
3297
3298         num_pages *= 16;
3299         num_pages *= PAGE_CACHE_SIZE;
3300
3301         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3302         if (ret)
3303                 goto out_put;
3304
3305         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3306                                               num_pages, num_pages,
3307                                               &alloc_hint);
3308         if (!ret)
3309                 dcs = BTRFS_DC_SETUP;
3310         btrfs_free_reserved_data_space(inode, num_pages);
3311
3312 out_put:
3313         iput(inode);
3314 out_free:
3315         btrfs_release_path(path);
3316 out:
3317         spin_lock(&block_group->lock);
3318         if (!ret && dcs == BTRFS_DC_SETUP)
3319                 block_group->cache_generation = trans->transid;
3320         block_group->disk_cache_state = dcs;
3321         spin_unlock(&block_group->lock);
3322
3323         return ret;
3324 }
3325
3326 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3327                             struct btrfs_root *root)
3328 {
3329         struct btrfs_block_group_cache *cache, *tmp;
3330         struct btrfs_transaction *cur_trans = trans->transaction;
3331         struct btrfs_path *path;
3332
3333         if (list_empty(&cur_trans->dirty_bgs) ||
3334             !btrfs_test_opt(root, SPACE_CACHE))
3335                 return 0;
3336
3337         path = btrfs_alloc_path();
3338         if (!path)
3339                 return -ENOMEM;
3340
3341         /* Could add new block groups, use _safe just in case */
3342         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3343                                  dirty_list) {
3344                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3345                         cache_save_setup(cache, trans, path);
3346         }
3347
3348         btrfs_free_path(path);
3349         return 0;
3350 }
3351
3352 /*
3353  * transaction commit does final block group cache writeback during a
3354  * critical section where nothing is allowed to change the FS.  This is
3355  * required in order for the cache to actually match the block group,
3356  * but can introduce a lot of latency into the commit.
3357  *
3358  * So, btrfs_start_dirty_block_groups is here to kick off block group
3359  * cache IO.  There's a chance we'll have to redo some of it if the
3360  * block group changes again during the commit, but it greatly reduces
3361  * the commit latency by getting rid of the easy block groups while
3362  * we're still allowing others to join the commit.
3363  */
3364 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3365                                    struct btrfs_root *root)
3366 {
3367         struct btrfs_block_group_cache *cache;
3368         struct btrfs_transaction *cur_trans = trans->transaction;
3369         int ret = 0;
3370         int should_put;
3371         struct btrfs_path *path = NULL;
3372         LIST_HEAD(dirty);
3373         struct list_head *io = &cur_trans->io_bgs;
3374         int num_started = 0;
3375         int loops = 0;
3376
3377         spin_lock(&cur_trans->dirty_bgs_lock);
3378         if (list_empty(&cur_trans->dirty_bgs)) {
3379                 spin_unlock(&cur_trans->dirty_bgs_lock);
3380                 return 0;
3381         }
3382         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3383         spin_unlock(&cur_trans->dirty_bgs_lock);
3384
3385 again:
3386         /*
3387          * make sure all the block groups on our dirty list actually
3388          * exist
3389          */
3390         btrfs_create_pending_block_groups(trans, root);
3391
3392         if (!path) {
3393                 path = btrfs_alloc_path();
3394                 if (!path)
3395                         return -ENOMEM;
3396         }
3397
3398         /*
3399          * cache_write_mutex is here only to save us from balance or automatic
3400          * removal of empty block groups deleting this block group while we are
3401          * writing out the cache
3402          */
3403         mutex_lock(&trans->transaction->cache_write_mutex);
3404         while (!list_empty(&dirty)) {
3405                 cache = list_first_entry(&dirty,
3406                                          struct btrfs_block_group_cache,
3407                                          dirty_list);
3408                 /*
3409                  * this can happen if something re-dirties a block
3410                  * group that is already under IO.  Just wait for it to
3411                  * finish and then do it all again
3412                  */
3413                 if (!list_empty(&cache->io_list)) {
3414                         list_del_init(&cache->io_list);
3415                         btrfs_wait_cache_io(root, trans, cache,
3416                                             &cache->io_ctl, path,
3417                                             cache->key.objectid);
3418                         btrfs_put_block_group(cache);
3419                 }
3420
3421
3422                 /*
3423                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3424                  * if it should update the cache_state.  Don't delete
3425                  * until after we wait.
3426                  *
3427                  * Since we're not running in the commit critical section
3428                  * we need the dirty_bgs_lock to protect from update_block_group
3429                  */
3430                 spin_lock(&cur_trans->dirty_bgs_lock);
3431                 list_del_init(&cache->dirty_list);
3432                 spin_unlock(&cur_trans->dirty_bgs_lock);
3433
3434                 should_put = 1;
3435
3436                 cache_save_setup(cache, trans, path);
3437
3438                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3439                         cache->io_ctl.inode = NULL;
3440                         ret = btrfs_write_out_cache(root, trans, cache, path);
3441                         if (ret == 0 && cache->io_ctl.inode) {
3442                                 num_started++;
3443                                 should_put = 0;
3444
3445                                 /*
3446                                  * the cache_write_mutex is protecting
3447                                  * the io_list
3448                                  */
3449                                 list_add_tail(&cache->io_list, io);
3450                         } else {
3451                                 /*
3452                                  * if we failed to write the cache, the
3453                                  * generation will be bad and life goes on
3454                                  */
3455                                 ret = 0;
3456                         }
3457                 }
3458                 if (!ret) {
3459                         ret = write_one_cache_group(trans, root, path, cache);
3460                         /*
3461                          * Our block group might still be attached to the list
3462                          * of new block groups in the transaction handle of some
3463                          * other task (struct btrfs_trans_handle->new_bgs). This
3464                          * means its block group item isn't yet in the extent
3465                          * tree. If this happens ignore the error, as we will
3466                          * try again later in the critical section of the
3467                          * transaction commit.
3468                          */
3469                         if (ret == -ENOENT) {
3470                                 ret = 0;
3471                                 spin_lock(&cur_trans->dirty_bgs_lock);
3472                                 if (list_empty(&cache->dirty_list)) {
3473                                         list_add_tail(&cache->dirty_list,
3474                                                       &cur_trans->dirty_bgs);
3475                                         btrfs_get_block_group(cache);
3476                                 }
3477                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3478                         } else if (ret) {
3479                                 btrfs_abort_transaction(trans, root, ret);
3480                         }
3481                 }
3482
3483                 /* if its not on the io list, we need to put the block group */
3484                 if (should_put)
3485                         btrfs_put_block_group(cache);
3486
3487                 if (ret)
3488                         break;
3489
3490                 /*
3491                  * Avoid blocking other tasks for too long. It might even save
3492                  * us from writing caches for block groups that are going to be
3493                  * removed.
3494                  */
3495                 mutex_unlock(&trans->transaction->cache_write_mutex);
3496                 mutex_lock(&trans->transaction->cache_write_mutex);
3497         }
3498         mutex_unlock(&trans->transaction->cache_write_mutex);
3499
3500         /*
3501          * go through delayed refs for all the stuff we've just kicked off
3502          * and then loop back (just once)
3503          */
3504         ret = btrfs_run_delayed_refs(trans, root, 0);
3505         if (!ret && loops == 0) {
3506                 loops++;
3507                 spin_lock(&cur_trans->dirty_bgs_lock);
3508                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3509                 /*
3510                  * dirty_bgs_lock protects us from concurrent block group
3511                  * deletes too (not just cache_write_mutex).
3512                  */
3513                 if (!list_empty(&dirty)) {
3514                         spin_unlock(&cur_trans->dirty_bgs_lock);
3515                         goto again;
3516                 }
3517                 spin_unlock(&cur_trans->dirty_bgs_lock);
3518         }
3519
3520         btrfs_free_path(path);
3521         return ret;
3522 }
3523
3524 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3525                                    struct btrfs_root *root)
3526 {
3527         struct btrfs_block_group_cache *cache;
3528         struct btrfs_transaction *cur_trans = trans->transaction;
3529         int ret = 0;
3530         int should_put;
3531         struct btrfs_path *path;
3532         struct list_head *io = &cur_trans->io_bgs;
3533         int num_started = 0;
3534
3535         path = btrfs_alloc_path();
3536         if (!path)
3537                 return -ENOMEM;
3538
3539         /*
3540          * We don't need the lock here since we are protected by the transaction
3541          * commit.  We want to do the cache_save_setup first and then run the
3542          * delayed refs to make sure we have the best chance at doing this all
3543          * in one shot.
3544          */
3545         while (!list_empty(&cur_trans->dirty_bgs)) {
3546                 cache = list_first_entry(&cur_trans->dirty_bgs,
3547                                          struct btrfs_block_group_cache,
3548                                          dirty_list);
3549
3550                 /*
3551                  * this can happen if cache_save_setup re-dirties a block
3552                  * group that is already under IO.  Just wait for it to
3553                  * finish and then do it all again
3554                  */
3555                 if (!list_empty(&cache->io_list)) {
3556                         list_del_init(&cache->io_list);
3557                         btrfs_wait_cache_io(root, trans, cache,
3558                                             &cache->io_ctl, path,
3559                                             cache->key.objectid);
3560                         btrfs_put_block_group(cache);
3561                 }
3562
3563                 /*
3564                  * don't remove from the dirty list until after we've waited
3565                  * on any pending IO
3566                  */
3567                 list_del_init(&cache->dirty_list);
3568                 should_put = 1;
3569
3570                 cache_save_setup(cache, trans, path);
3571
3572                 if (!ret)
3573                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3574
3575                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3576                         cache->io_ctl.inode = NULL;
3577                         ret = btrfs_write_out_cache(root, trans, cache, path);
3578                         if (ret == 0 && cache->io_ctl.inode) {
3579                                 num_started++;
3580                                 should_put = 0;
3581                                 list_add_tail(&cache->io_list, io);
3582                         } else {
3583                                 /*
3584                                  * if we failed to write the cache, the
3585                                  * generation will be bad and life goes on
3586                                  */
3587                                 ret = 0;
3588                         }
3589                 }
3590                 if (!ret) {
3591                         ret = write_one_cache_group(trans, root, path, cache);
3592                         if (ret)
3593                                 btrfs_abort_transaction(trans, root, ret);
3594                 }
3595
3596                 /* if its not on the io list, we need to put the block group */
3597                 if (should_put)
3598                         btrfs_put_block_group(cache);
3599         }
3600
3601         while (!list_empty(io)) {
3602                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3603                                          io_list);
3604                 list_del_init(&cache->io_list);
3605                 btrfs_wait_cache_io(root, trans, cache,
3606                                     &cache->io_ctl, path, cache->key.objectid);
3607                 btrfs_put_block_group(cache);
3608         }
3609
3610         btrfs_free_path(path);
3611         return ret;
3612 }
3613
3614 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3615 {
3616         struct btrfs_block_group_cache *block_group;
3617         int readonly = 0;
3618
3619         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3620         if (!block_group || block_group->ro)
3621                 readonly = 1;
3622         if (block_group)
3623                 btrfs_put_block_group(block_group);
3624         return readonly;
3625 }
3626
3627 static const char *alloc_name(u64 flags)
3628 {
3629         switch (flags) {
3630         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3631                 return "mixed";
3632         case BTRFS_BLOCK_GROUP_METADATA:
3633                 return "metadata";
3634         case BTRFS_BLOCK_GROUP_DATA:
3635                 return "data";
3636         case BTRFS_BLOCK_GROUP_SYSTEM:
3637                 return "system";
3638         default:
3639                 WARN_ON(1);
3640                 return "invalid-combination";
3641         };
3642 }
3643
3644 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3645                              u64 total_bytes, u64 bytes_used,
3646                              struct btrfs_space_info **space_info)
3647 {
3648         struct btrfs_space_info *found;
3649         int i;
3650         int factor;
3651         int ret;
3652
3653         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3654                      BTRFS_BLOCK_GROUP_RAID10))
3655                 factor = 2;
3656         else
3657                 factor = 1;
3658
3659         found = __find_space_info(info, flags);
3660         if (found) {
3661                 spin_lock(&found->lock);
3662                 found->total_bytes += total_bytes;
3663                 found->disk_total += total_bytes * factor;
3664                 found->bytes_used += bytes_used;
3665                 found->disk_used += bytes_used * factor;
3666                 if (total_bytes > 0)
3667                         found->full = 0;
3668                 spin_unlock(&found->lock);
3669                 *space_info = found;
3670                 return 0;
3671         }
3672         found = kzalloc(sizeof(*found), GFP_NOFS);
3673         if (!found)
3674                 return -ENOMEM;
3675
3676         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3677         if (ret) {
3678                 kfree(found);
3679                 return ret;
3680         }
3681
3682         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3683                 INIT_LIST_HEAD(&found->block_groups[i]);
3684         init_rwsem(&found->groups_sem);
3685         spin_lock_init(&found->lock);
3686         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3687         found->total_bytes = total_bytes;
3688         found->disk_total = total_bytes * factor;
3689         found->bytes_used = bytes_used;
3690         found->disk_used = bytes_used * factor;
3691         found->bytes_pinned = 0;
3692         found->bytes_reserved = 0;
3693         found->bytes_readonly = 0;
3694         found->bytes_may_use = 0;
3695         if (total_bytes > 0)
3696                 found->full = 0;
3697         else
3698                 found->full = 1;
3699         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3700         found->chunk_alloc = 0;
3701         found->flush = 0;
3702         init_waitqueue_head(&found->wait);
3703         INIT_LIST_HEAD(&found->ro_bgs);
3704
3705         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3706                                     info->space_info_kobj, "%s",
3707                                     alloc_name(found->flags));
3708         if (ret) {
3709                 kfree(found);
3710                 return ret;
3711         }
3712
3713         *space_info = found;
3714         list_add_rcu(&found->list, &info->space_info);
3715         if (flags & BTRFS_BLOCK_GROUP_DATA)
3716                 info->data_sinfo = found;
3717
3718         return ret;
3719 }
3720
3721 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3722 {
3723         u64 extra_flags = chunk_to_extended(flags) &
3724                                 BTRFS_EXTENDED_PROFILE_MASK;
3725
3726         write_seqlock(&fs_info->profiles_lock);
3727         if (flags & BTRFS_BLOCK_GROUP_DATA)
3728                 fs_info->avail_data_alloc_bits |= extra_flags;
3729         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3730                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3731         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3732                 fs_info->avail_system_alloc_bits |= extra_flags;
3733         write_sequnlock(&fs_info->profiles_lock);
3734 }
3735
3736 /*
3737  * returns target flags in extended format or 0 if restripe for this
3738  * chunk_type is not in progress
3739  *
3740  * should be called with either volume_mutex or balance_lock held
3741  */
3742 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3743 {
3744         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3745         u64 target = 0;
3746
3747         if (!bctl)
3748                 return 0;
3749
3750         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3751             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3752                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3753         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3754                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3755                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3756         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3757                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3758                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3759         }
3760
3761         return target;
3762 }
3763
3764 /*
3765  * @flags: available profiles in extended format (see ctree.h)
3766  *
3767  * Returns reduced profile in chunk format.  If profile changing is in
3768  * progress (either running or paused) picks the target profile (if it's
3769  * already available), otherwise falls back to plain reducing.
3770  */
3771 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3772 {
3773         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3774         u64 target;
3775         u64 tmp;
3776
3777         /*
3778          * see if restripe for this chunk_type is in progress, if so
3779          * try to reduce to the target profile
3780          */
3781         spin_lock(&root->fs_info->balance_lock);
3782         target = get_restripe_target(root->fs_info, flags);
3783         if (target) {
3784                 /* pick target profile only if it's already available */
3785                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3786                         spin_unlock(&root->fs_info->balance_lock);
3787                         return extended_to_chunk(target);
3788                 }
3789         }
3790         spin_unlock(&root->fs_info->balance_lock);
3791
3792         /* First, mask out the RAID levels which aren't possible */
3793         if (num_devices == 1)
3794                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3795                            BTRFS_BLOCK_GROUP_RAID5);
3796         if (num_devices < 3)
3797                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3798         if (num_devices < 4)
3799                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3800
3801         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3802                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3803                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3804         flags &= ~tmp;
3805
3806         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3807                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3808         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3809                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3810         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3811                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3812         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3813                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3814         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3815                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3816
3817         return extended_to_chunk(flags | tmp);
3818 }
3819
3820 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3821 {
3822         unsigned seq;
3823         u64 flags;
3824
3825         do {
3826                 flags = orig_flags;
3827                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3828
3829                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3830                         flags |= root->fs_info->avail_data_alloc_bits;
3831                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3832                         flags |= root->fs_info->avail_system_alloc_bits;
3833                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3834                         flags |= root->fs_info->avail_metadata_alloc_bits;
3835         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3836
3837         return btrfs_reduce_alloc_profile(root, flags);
3838 }
3839
3840 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3841 {
3842         u64 flags;
3843         u64 ret;
3844
3845         if (data)
3846                 flags = BTRFS_BLOCK_GROUP_DATA;
3847         else if (root == root->fs_info->chunk_root)
3848                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3849         else
3850                 flags = BTRFS_BLOCK_GROUP_METADATA;
3851
3852         ret = get_alloc_profile(root, flags);
3853         return ret;
3854 }
3855
3856 /*
3857  * This will check the space that the inode allocates from to make sure we have
3858  * enough space for bytes.
3859  */
3860 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3861 {
3862         struct btrfs_space_info *data_sinfo;
3863         struct btrfs_root *root = BTRFS_I(inode)->root;
3864         struct btrfs_fs_info *fs_info = root->fs_info;
3865         u64 used;
3866         int ret = 0;
3867         int need_commit = 2;
3868         int have_pinned_space;
3869
3870         /* make sure bytes are sectorsize aligned */
3871         bytes = ALIGN(bytes, root->sectorsize);
3872
3873         if (btrfs_is_free_space_inode(inode)) {
3874                 need_commit = 0;
3875                 ASSERT(current->journal_info);
3876         }
3877
3878         data_sinfo = fs_info->data_sinfo;
3879         if (!data_sinfo)
3880                 goto alloc;
3881
3882 again:
3883         /* make sure we have enough space to handle the data first */
3884         spin_lock(&data_sinfo->lock);
3885         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3886                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3887                 data_sinfo->bytes_may_use;
3888
3889         if (used + bytes > data_sinfo->total_bytes) {
3890                 struct btrfs_trans_handle *trans;
3891
3892                 /*
3893                  * if we don't have enough free bytes in this space then we need
3894                  * to alloc a new chunk.
3895                  */
3896                 if (!data_sinfo->full) {
3897                         u64 alloc_target;
3898
3899                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3900                         spin_unlock(&data_sinfo->lock);
3901 alloc:
3902                         alloc_target = btrfs_get_alloc_profile(root, 1);
3903                         /*
3904                          * It is ugly that we don't call nolock join
3905                          * transaction for the free space inode case here.
3906                          * But it is safe because we only do the data space
3907                          * reservation for the free space cache in the
3908                          * transaction context, the common join transaction
3909                          * just increase the counter of the current transaction
3910                          * handler, doesn't try to acquire the trans_lock of
3911                          * the fs.
3912                          */
3913                         trans = btrfs_join_transaction(root);
3914                         if (IS_ERR(trans))
3915                                 return PTR_ERR(trans);
3916
3917                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3918                                              alloc_target,
3919                                              CHUNK_ALLOC_NO_FORCE);
3920                         btrfs_end_transaction(trans, root);
3921                         if (ret < 0) {
3922                                 if (ret != -ENOSPC)
3923                                         return ret;
3924                                 else {
3925                                         have_pinned_space = 1;
3926                                         goto commit_trans;
3927                                 }
3928                         }
3929
3930                         if (!data_sinfo)
3931                                 data_sinfo = fs_info->data_sinfo;
3932
3933                         goto again;
3934                 }
3935
3936                 /*
3937                  * If we don't have enough pinned space to deal with this
3938                  * allocation, and no removed chunk in current transaction,
3939                  * don't bother committing the transaction.
3940                  */
3941                 have_pinned_space = percpu_counter_compare(
3942                         &data_sinfo->total_bytes_pinned,
3943                         used + bytes - data_sinfo->total_bytes);
3944                 spin_unlock(&data_sinfo->lock);
3945
3946                 /* commit the current transaction and try again */
3947 commit_trans:
3948                 if (need_commit &&
3949                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3950                         need_commit--;
3951
3952                         if (need_commit > 0)
3953                                 btrfs_wait_ordered_roots(fs_info, -1);
3954
3955                         trans = btrfs_join_transaction(root);
3956                         if (IS_ERR(trans))
3957                                 return PTR_ERR(trans);
3958                         if (have_pinned_space >= 0 ||
3959                             trans->transaction->have_free_bgs ||
3960                             need_commit > 0) {
3961                                 ret = btrfs_commit_transaction(trans, root);
3962                                 if (ret)
3963                                         return ret;
3964                                 /*
3965                                  * make sure that all running delayed iput are
3966                                  * done
3967                                  */
3968                                 down_write(&root->fs_info->delayed_iput_sem);
3969                                 up_write(&root->fs_info->delayed_iput_sem);
3970                                 goto again;
3971                         } else {
3972                                 btrfs_end_transaction(trans, root);
3973                         }
3974                 }
3975
3976                 trace_btrfs_space_reservation(root->fs_info,
3977                                               "space_info:enospc",
3978                                               data_sinfo->flags, bytes, 1);
3979                 return -ENOSPC;
3980         }
3981         ret = btrfs_qgroup_reserve(root, write_bytes);
3982         if (ret)
3983                 goto out;
3984         data_sinfo->bytes_may_use += bytes;
3985         trace_btrfs_space_reservation(root->fs_info, "space_info",
3986                                       data_sinfo->flags, bytes, 1);
3987 out:
3988         spin_unlock(&data_sinfo->lock);
3989
3990         return ret;
3991 }
3992
3993 /*
3994  * Called if we need to clear a data reservation for this inode.
3995  */
3996 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3997 {
3998         struct btrfs_root *root = BTRFS_I(inode)->root;
3999         struct btrfs_space_info *data_sinfo;
4000
4001         /* make sure bytes are sectorsize aligned */
4002         bytes = ALIGN(bytes, root->sectorsize);
4003
4004         data_sinfo = root->fs_info->data_sinfo;
4005         spin_lock(&data_sinfo->lock);
4006         WARN_ON(data_sinfo->bytes_may_use < bytes);
4007         data_sinfo->bytes_may_use -= bytes;
4008         trace_btrfs_space_reservation(root->fs_info, "space_info",
4009                                       data_sinfo->flags, bytes, 0);
4010         spin_unlock(&data_sinfo->lock);
4011 }
4012
4013 static void force_metadata_allocation(struct btrfs_fs_info *info)
4014 {
4015         struct list_head *head = &info->space_info;
4016         struct btrfs_space_info *found;
4017
4018         rcu_read_lock();
4019         list_for_each_entry_rcu(found, head, list) {
4020                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4021                         found->force_alloc = CHUNK_ALLOC_FORCE;
4022         }
4023         rcu_read_unlock();
4024 }
4025
4026 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4027 {
4028         return (global->size << 1);
4029 }
4030
4031 static int should_alloc_chunk(struct btrfs_root *root,
4032                               struct btrfs_space_info *sinfo, int force)
4033 {
4034         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4035         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4036         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4037         u64 thresh;
4038
4039         if (force == CHUNK_ALLOC_FORCE)
4040                 return 1;
4041
4042         /*
4043          * We need to take into account the global rsv because for all intents
4044          * and purposes it's used space.  Don't worry about locking the
4045          * global_rsv, it doesn't change except when the transaction commits.
4046          */
4047         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4048                 num_allocated += calc_global_rsv_need_space(global_rsv);
4049
4050         /*
4051          * in limited mode, we want to have some free space up to
4052          * about 1% of the FS size.
4053          */
4054         if (force == CHUNK_ALLOC_LIMITED) {
4055                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4056                 thresh = max_t(u64, 64 * 1024 * 1024,
4057                                div_factor_fine(thresh, 1));
4058
4059                 if (num_bytes - num_allocated < thresh)
4060                         return 1;
4061         }
4062
4063         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4064                 return 0;
4065         return 1;
4066 }
4067
4068 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4069 {
4070         u64 num_dev;
4071
4072         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4073                     BTRFS_BLOCK_GROUP_RAID0 |
4074                     BTRFS_BLOCK_GROUP_RAID5 |
4075                     BTRFS_BLOCK_GROUP_RAID6))
4076                 num_dev = root->fs_info->fs_devices->rw_devices;
4077         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4078                 num_dev = 2;
4079         else
4080                 num_dev = 1;    /* DUP or single */
4081
4082         return num_dev;
4083 }
4084
4085 /*
4086  * If @is_allocation is true, reserve space in the system space info necessary
4087  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4088  * removing a chunk.
4089  */
4090 void check_system_chunk(struct btrfs_trans_handle *trans,
4091                         struct btrfs_root *root,
4092                         u64 type)
4093 {
4094         struct btrfs_space_info *info;
4095         u64 left;
4096         u64 thresh;
4097         int ret = 0;
4098         u64 num_devs;
4099
4100         /*
4101          * Needed because we can end up allocating a system chunk and for an
4102          * atomic and race free space reservation in the chunk block reserve.
4103          */
4104         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4105
4106         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4107         spin_lock(&info->lock);
4108         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4109                 info->bytes_reserved - info->bytes_readonly -
4110                 info->bytes_may_use;
4111         spin_unlock(&info->lock);
4112
4113         num_devs = get_profile_num_devs(root, type);
4114
4115         /* num_devs device items to update and 1 chunk item to add or remove */
4116         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4117                 btrfs_calc_trans_metadata_size(root, 1);
4118
4119         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4120                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4121                         left, thresh, type);
4122                 dump_space_info(info, 0, 0);
4123         }
4124
4125         if (left < thresh) {
4126                 u64 flags;
4127
4128                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4129                 /*
4130                  * Ignore failure to create system chunk. We might end up not
4131                  * needing it, as we might not need to COW all nodes/leafs from
4132                  * the paths we visit in the chunk tree (they were already COWed
4133                  * or created in the current transaction for example).
4134                  */
4135                 ret = btrfs_alloc_chunk(trans, root, flags);
4136         }
4137
4138         if (!ret) {
4139                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4140                                           &root->fs_info->chunk_block_rsv,
4141                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4142                 if (!ret)
4143                         trans->chunk_bytes_reserved += thresh;
4144         }
4145 }
4146
4147 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4148                           struct btrfs_root *extent_root, u64 flags, int force)
4149 {
4150         struct btrfs_space_info *space_info;
4151         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4152         int wait_for_alloc = 0;
4153         int ret = 0;
4154
4155         /* Don't re-enter if we're already allocating a chunk */
4156         if (trans->allocating_chunk)
4157                 return -ENOSPC;
4158
4159         space_info = __find_space_info(extent_root->fs_info, flags);
4160         if (!space_info) {
4161                 ret = update_space_info(extent_root->fs_info, flags,
4162                                         0, 0, &space_info);
4163                 BUG_ON(ret); /* -ENOMEM */
4164         }
4165         BUG_ON(!space_info); /* Logic error */
4166
4167 again:
4168         spin_lock(&space_info->lock);
4169         if (force < space_info->force_alloc)
4170                 force = space_info->force_alloc;
4171         if (space_info->full) {
4172                 if (should_alloc_chunk(extent_root, space_info, force))
4173                         ret = -ENOSPC;
4174                 else
4175                         ret = 0;
4176                 spin_unlock(&space_info->lock);
4177                 return ret;
4178         }
4179
4180         if (!should_alloc_chunk(extent_root, space_info, force)) {
4181                 spin_unlock(&space_info->lock);
4182                 return 0;
4183         } else if (space_info->chunk_alloc) {
4184                 wait_for_alloc = 1;
4185         } else {
4186                 space_info->chunk_alloc = 1;
4187         }
4188
4189         spin_unlock(&space_info->lock);
4190
4191         mutex_lock(&fs_info->chunk_mutex);
4192
4193         /*
4194          * The chunk_mutex is held throughout the entirety of a chunk
4195          * allocation, so once we've acquired the chunk_mutex we know that the
4196          * other guy is done and we need to recheck and see if we should
4197          * allocate.
4198          */
4199         if (wait_for_alloc) {
4200                 mutex_unlock(&fs_info->chunk_mutex);
4201                 wait_for_alloc = 0;
4202                 goto again;
4203         }
4204
4205         trans->allocating_chunk = true;
4206
4207         /*
4208          * If we have mixed data/metadata chunks we want to make sure we keep
4209          * allocating mixed chunks instead of individual chunks.
4210          */
4211         if (btrfs_mixed_space_info(space_info))
4212                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4213
4214         /*
4215          * if we're doing a data chunk, go ahead and make sure that
4216          * we keep a reasonable number of metadata chunks allocated in the
4217          * FS as well.
4218          */
4219         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4220                 fs_info->data_chunk_allocations++;
4221                 if (!(fs_info->data_chunk_allocations %
4222                       fs_info->metadata_ratio))
4223                         force_metadata_allocation(fs_info);
4224         }
4225
4226         /*
4227          * Check if we have enough space in SYSTEM chunk because we may need
4228          * to update devices.
4229          */
4230         check_system_chunk(trans, extent_root, flags);
4231
4232         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4233         trans->allocating_chunk = false;
4234
4235         spin_lock(&space_info->lock);
4236         if (ret < 0 && ret != -ENOSPC)
4237                 goto out;
4238         if (ret)
4239                 space_info->full = 1;
4240         else
4241                 ret = 1;
4242
4243         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4244 out:
4245         space_info->chunk_alloc = 0;
4246         spin_unlock(&space_info->lock);
4247         mutex_unlock(&fs_info->chunk_mutex);
4248         /*
4249          * When we allocate a new chunk we reserve space in the chunk block
4250          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4251          * add new nodes/leafs to it if we end up needing to do it when
4252          * inserting the chunk item and updating device items as part of the
4253          * second phase of chunk allocation, performed by
4254          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4255          * large number of new block groups to create in our transaction
4256          * handle's new_bgs list to avoid exhausting the chunk block reserve
4257          * in extreme cases - like having a single transaction create many new
4258          * block groups when starting to write out the free space caches of all
4259          * the block groups that were made dirty during the lifetime of the
4260          * transaction.
4261          */
4262         if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4263                 btrfs_create_pending_block_groups(trans, trans->root);
4264                 btrfs_trans_release_chunk_metadata(trans);
4265         }
4266         return ret;
4267 }
4268
4269 static int can_overcommit(struct btrfs_root *root,
4270                           struct btrfs_space_info *space_info, u64 bytes,
4271                           enum btrfs_reserve_flush_enum flush)
4272 {
4273         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4274         u64 profile = btrfs_get_alloc_profile(root, 0);
4275         u64 space_size;
4276         u64 avail;
4277         u64 used;
4278
4279         used = space_info->bytes_used + space_info->bytes_reserved +
4280                 space_info->bytes_pinned + space_info->bytes_readonly;
4281
4282         /*
4283          * We only want to allow over committing if we have lots of actual space
4284          * free, but if we don't have enough space to handle the global reserve
4285          * space then we could end up having a real enospc problem when trying
4286          * to allocate a chunk or some other such important allocation.
4287          */
4288         spin_lock(&global_rsv->lock);
4289         space_size = calc_global_rsv_need_space(global_rsv);
4290         spin_unlock(&global_rsv->lock);
4291         if (used + space_size >= space_info->total_bytes)
4292                 return 0;
4293
4294         used += space_info->bytes_may_use;
4295
4296         spin_lock(&root->fs_info->free_chunk_lock);
4297         avail = root->fs_info->free_chunk_space;
4298         spin_unlock(&root->fs_info->free_chunk_lock);
4299
4300         /*
4301          * If we have dup, raid1 or raid10 then only half of the free
4302          * space is actually useable.  For raid56, the space info used
4303          * doesn't include the parity drive, so we don't have to
4304          * change the math
4305          */
4306         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4307                        BTRFS_BLOCK_GROUP_RAID1 |
4308                        BTRFS_BLOCK_GROUP_RAID10))
4309                 avail >>= 1;
4310
4311         /*
4312          * If we aren't flushing all things, let us overcommit up to
4313          * 1/2th of the space. If we can flush, don't let us overcommit
4314          * too much, let it overcommit up to 1/8 of the space.
4315          */
4316         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4317                 avail >>= 3;
4318         else
4319                 avail >>= 1;
4320
4321         if (used + bytes < space_info->total_bytes + avail)
4322                 return 1;
4323         return 0;
4324 }
4325
4326 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4327                                          unsigned long nr_pages, int nr_items)
4328 {
4329         struct super_block *sb = root->fs_info->sb;
4330
4331         if (down_read_trylock(&sb->s_umount)) {
4332                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4333                 up_read(&sb->s_umount);
4334         } else {
4335                 /*
4336                  * We needn't worry the filesystem going from r/w to r/o though
4337                  * we don't acquire ->s_umount mutex, because the filesystem
4338                  * should guarantee the delalloc inodes list be empty after
4339                  * the filesystem is readonly(all dirty pages are written to
4340                  * the disk).
4341                  */
4342                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4343                 if (!current->journal_info)
4344                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4345         }
4346 }
4347
4348 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4349 {
4350         u64 bytes;
4351         int nr;
4352
4353         bytes = btrfs_calc_trans_metadata_size(root, 1);
4354         nr = (int)div64_u64(to_reclaim, bytes);
4355         if (!nr)
4356                 nr = 1;
4357         return nr;
4358 }
4359
4360 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4361
4362 /*
4363  * shrink metadata reservation for delalloc
4364  */
4365 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4366                             bool wait_ordered)
4367 {
4368         struct btrfs_block_rsv *block_rsv;
4369         struct btrfs_space_info *space_info;
4370         struct btrfs_trans_handle *trans;
4371         u64 delalloc_bytes;
4372         u64 max_reclaim;
4373         long time_left;
4374         unsigned long nr_pages;
4375         int loops;
4376         int items;
4377         enum btrfs_reserve_flush_enum flush;
4378
4379         /* Calc the number of the pages we need flush for space reservation */
4380         items = calc_reclaim_items_nr(root, to_reclaim);
4381         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4382
4383         trans = (struct btrfs_trans_handle *)current->journal_info;
4384         block_rsv = &root->fs_info->delalloc_block_rsv;
4385         space_info = block_rsv->space_info;
4386
4387         delalloc_bytes = percpu_counter_sum_positive(
4388                                                 &root->fs_info->delalloc_bytes);
4389         if (delalloc_bytes == 0) {
4390                 if (trans)
4391                         return;
4392                 if (wait_ordered)
4393                         btrfs_wait_ordered_roots(root->fs_info, items);
4394                 return;
4395         }
4396
4397         loops = 0;
4398         while (delalloc_bytes && loops < 3) {
4399                 max_reclaim = min(delalloc_bytes, to_reclaim);
4400                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4401                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4402                 /*
4403                  * We need to wait for the async pages to actually start before
4404                  * we do anything.
4405                  */
4406                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4407                 if (!max_reclaim)
4408                         goto skip_async;
4409
4410                 if (max_reclaim <= nr_pages)
4411                         max_reclaim = 0;
4412                 else
4413                         max_reclaim -= nr_pages;
4414
4415                 wait_event(root->fs_info->async_submit_wait,
4416                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4417                            (int)max_reclaim);
4418 skip_async:
4419                 if (!trans)
4420                         flush = BTRFS_RESERVE_FLUSH_ALL;
4421                 else
4422                         flush = BTRFS_RESERVE_NO_FLUSH;
4423                 spin_lock(&space_info->lock);
4424                 if (can_overcommit(root, space_info, orig, flush)) {
4425                         spin_unlock(&space_info->lock);
4426                         break;
4427                 }
4428                 spin_unlock(&space_info->lock);
4429
4430                 loops++;
4431                 if (wait_ordered && !trans) {
4432                         btrfs_wait_ordered_roots(root->fs_info, items);
4433                 } else {
4434                         time_left = schedule_timeout_killable(1);
4435                         if (time_left)
4436                                 break;
4437                 }
4438                 delalloc_bytes = percpu_counter_sum_positive(
4439                                                 &root->fs_info->delalloc_bytes);
4440         }
4441 }
4442
4443 /**
4444  * maybe_commit_transaction - possibly commit the transaction if its ok to
4445  * @root - the root we're allocating for
4446  * @bytes - the number of bytes we want to reserve
4447  * @force - force the commit
4448  *
4449  * This will check to make sure that committing the transaction will actually
4450  * get us somewhere and then commit the transaction if it does.  Otherwise it
4451  * will return -ENOSPC.
4452  */
4453 static int may_commit_transaction(struct btrfs_root *root,
4454                                   struct btrfs_space_info *space_info,
4455                                   u64 bytes, int force)
4456 {
4457         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4458         struct btrfs_trans_handle *trans;
4459
4460         trans = (struct btrfs_trans_handle *)current->journal_info;
4461         if (trans)
4462                 return -EAGAIN;
4463
4464         if (force)
4465                 goto commit;
4466
4467         /* See if there is enough pinned space to make this reservation */
4468         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4469                                    bytes) >= 0)
4470                 goto commit;
4471
4472         /*
4473          * See if there is some space in the delayed insertion reservation for
4474          * this reservation.
4475          */
4476         if (space_info != delayed_rsv->space_info)
4477                 return -ENOSPC;
4478
4479         spin_lock(&delayed_rsv->lock);
4480         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4481                                    bytes - delayed_rsv->size) >= 0) {
4482                 spin_unlock(&delayed_rsv->lock);
4483                 return -ENOSPC;
4484         }
4485         spin_unlock(&delayed_rsv->lock);
4486
4487 commit:
4488         trans = btrfs_join_transaction(root);
4489         if (IS_ERR(trans))
4490                 return -ENOSPC;
4491
4492         return btrfs_commit_transaction(trans, root);
4493 }
4494
4495 enum flush_state {
4496         FLUSH_DELAYED_ITEMS_NR  =       1,
4497         FLUSH_DELAYED_ITEMS     =       2,
4498         FLUSH_DELALLOC          =       3,
4499         FLUSH_DELALLOC_WAIT     =       4,
4500         ALLOC_CHUNK             =       5,
4501         COMMIT_TRANS            =       6,
4502 };
4503
4504 static int flush_space(struct btrfs_root *root,
4505                        struct btrfs_space_info *space_info, u64 num_bytes,
4506                        u64 orig_bytes, int state)
4507 {
4508         struct btrfs_trans_handle *trans;
4509         int nr;
4510         int ret = 0;
4511
4512         switch (state) {
4513         case FLUSH_DELAYED_ITEMS_NR:
4514         case FLUSH_DELAYED_ITEMS:
4515                 if (state == FLUSH_DELAYED_ITEMS_NR)
4516                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4517                 else
4518                         nr = -1;
4519
4520                 trans = btrfs_join_transaction(root);
4521                 if (IS_ERR(trans)) {
4522                         ret = PTR_ERR(trans);
4523                         break;
4524                 }
4525                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4526                 btrfs_end_transaction(trans, root);
4527                 break;
4528         case FLUSH_DELALLOC:
4529         case FLUSH_DELALLOC_WAIT:
4530                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4531                                 state == FLUSH_DELALLOC_WAIT);
4532                 break;
4533         case ALLOC_CHUNK:
4534                 trans = btrfs_join_transaction(root);
4535                 if (IS_ERR(trans)) {
4536                         ret = PTR_ERR(trans);
4537                         break;
4538                 }
4539                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4540                                      btrfs_get_alloc_profile(root, 0),
4541                                      CHUNK_ALLOC_NO_FORCE);
4542                 btrfs_end_transaction(trans, root);
4543                 if (ret == -ENOSPC)
4544                         ret = 0;
4545                 break;
4546         case COMMIT_TRANS:
4547                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4548                 break;
4549         default:
4550                 ret = -ENOSPC;
4551                 break;
4552         }
4553
4554         return ret;
4555 }
4556
4557 static inline u64
4558 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4559                                  struct btrfs_space_info *space_info)
4560 {
4561         u64 used;
4562         u64 expected;
4563         u64 to_reclaim;
4564
4565         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4566                                 16 * 1024 * 1024);
4567         spin_lock(&space_info->lock);
4568         if (can_overcommit(root, space_info, to_reclaim,
4569                            BTRFS_RESERVE_FLUSH_ALL)) {
4570                 to_reclaim = 0;
4571                 goto out;
4572         }
4573
4574         used = space_info->bytes_used + space_info->bytes_reserved +
4575                space_info->bytes_pinned + space_info->bytes_readonly +
4576                space_info->bytes_may_use;
4577         if (can_overcommit(root, space_info, 1024 * 1024,
4578                            BTRFS_RESERVE_FLUSH_ALL))
4579                 expected = div_factor_fine(space_info->total_bytes, 95);
4580         else
4581                 expected = div_factor_fine(space_info->total_bytes, 90);
4582
4583         if (used > expected)
4584                 to_reclaim = used - expected;
4585         else
4586                 to_reclaim = 0;
4587         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4588                                      space_info->bytes_reserved);
4589 out:
4590         spin_unlock(&space_info->lock);
4591
4592         return to_reclaim;
4593 }
4594
4595 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4596                                         struct btrfs_fs_info *fs_info, u64 used)
4597 {
4598         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4599
4600         /* If we're just plain full then async reclaim just slows us down. */
4601         if (space_info->bytes_used >= thresh)
4602                 return 0;
4603
4604         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4605                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4606 }
4607
4608 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4609                                        struct btrfs_fs_info *fs_info,
4610                                        int flush_state)
4611 {
4612         u64 used;
4613
4614         spin_lock(&space_info->lock);
4615         /*
4616          * We run out of space and have not got any free space via flush_space,
4617          * so don't bother doing async reclaim.
4618          */
4619         if (flush_state > COMMIT_TRANS && space_info->full) {
4620                 spin_unlock(&space_info->lock);
4621                 return 0;
4622         }
4623
4624         used = space_info->bytes_used + space_info->bytes_reserved +
4625                space_info->bytes_pinned + space_info->bytes_readonly +
4626                space_info->bytes_may_use;
4627         if (need_do_async_reclaim(space_info, fs_info, used)) {
4628                 spin_unlock(&space_info->lock);
4629                 return 1;
4630         }
4631         spin_unlock(&space_info->lock);
4632
4633         return 0;
4634 }
4635
4636 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4637 {
4638         struct btrfs_fs_info *fs_info;
4639         struct btrfs_space_info *space_info;
4640         u64 to_reclaim;
4641         int flush_state;
4642
4643         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4644         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4645
4646         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4647                                                       space_info);
4648         if (!to_reclaim)
4649                 return;
4650
4651         flush_state = FLUSH_DELAYED_ITEMS_NR;
4652         do {
4653                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4654                             to_reclaim, flush_state);
4655                 flush_state++;
4656                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4657                                                  flush_state))
4658                         return;
4659         } while (flush_state < COMMIT_TRANS);
4660 }
4661
4662 void btrfs_init_async_reclaim_work(struct work_struct *work)
4663 {
4664         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4665 }
4666
4667 /**
4668  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4669  * @root - the root we're allocating for
4670  * @block_rsv - the block_rsv we're allocating for
4671  * @orig_bytes - the number of bytes we want
4672  * @flush - whether or not we can flush to make our reservation
4673  *
4674  * This will reserve orgi_bytes number of bytes from the space info associated
4675  * with the block_rsv.  If there is not enough space it will make an attempt to
4676  * flush out space to make room.  It will do this by flushing delalloc if
4677  * possible or committing the transaction.  If flush is 0 then no attempts to
4678  * regain reservations will be made and this will fail if there is not enough
4679  * space already.
4680  */
4681 static int reserve_metadata_bytes(struct btrfs_root *root,
4682                                   struct btrfs_block_rsv *block_rsv,
4683                                   u64 orig_bytes,
4684                                   enum btrfs_reserve_flush_enum flush)
4685 {
4686         struct btrfs_space_info *space_info = block_rsv->space_info;
4687         u64 used;
4688         u64 num_bytes = orig_bytes;
4689         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4690         int ret = 0;
4691         bool flushing = false;
4692
4693 again:
4694         ret = 0;
4695         spin_lock(&space_info->lock);
4696         /*
4697          * We only want to wait if somebody other than us is flushing and we
4698          * are actually allowed to flush all things.
4699          */
4700         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4701                space_info->flush) {
4702                 spin_unlock(&space_info->lock);
4703                 /*
4704                  * If we have a trans handle we can't wait because the flusher
4705                  * may have to commit the transaction, which would mean we would
4706                  * deadlock since we are waiting for the flusher to finish, but
4707                  * hold the current transaction open.
4708                  */
4709                 if (current->journal_info)
4710                         return -EAGAIN;
4711                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4712                 /* Must have been killed, return */
4713                 if (ret)
4714                         return -EINTR;
4715
4716                 spin_lock(&space_info->lock);
4717         }
4718
4719         ret = -ENOSPC;
4720         used = space_info->bytes_used + space_info->bytes_reserved +
4721                 space_info->bytes_pinned + space_info->bytes_readonly +
4722                 space_info->bytes_may_use;
4723
4724         /*
4725          * The idea here is that we've not already over-reserved the block group
4726          * then we can go ahead and save our reservation first and then start
4727          * flushing if we need to.  Otherwise if we've already overcommitted
4728          * lets start flushing stuff first and then come back and try to make
4729          * our reservation.
4730          */
4731         if (used <= space_info->total_bytes) {
4732                 if (used + orig_bytes <= space_info->total_bytes) {
4733                         space_info->bytes_may_use += orig_bytes;
4734                         trace_btrfs_space_reservation(root->fs_info,
4735                                 "space_info", space_info->flags, orig_bytes, 1);
4736                         ret = 0;
4737                 } else {
4738                         /*
4739                          * Ok set num_bytes to orig_bytes since we aren't
4740                          * overocmmitted, this way we only try and reclaim what
4741                          * we need.
4742                          */
4743                         num_bytes = orig_bytes;
4744                 }
4745         } else {
4746                 /*
4747                  * Ok we're over committed, set num_bytes to the overcommitted
4748                  * amount plus the amount of bytes that we need for this
4749                  * reservation.
4750                  */
4751                 num_bytes = used - space_info->total_bytes +
4752                         (orig_bytes * 2);
4753         }
4754
4755         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4756                 space_info->bytes_may_use += orig_bytes;
4757                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4758                                               space_info->flags, orig_bytes,
4759                                               1);
4760                 ret = 0;
4761         }
4762
4763         /*
4764          * Couldn't make our reservation, save our place so while we're trying
4765          * to reclaim space we can actually use it instead of somebody else
4766          * stealing it from us.
4767          *
4768          * We make the other tasks wait for the flush only when we can flush
4769          * all things.
4770          */
4771         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4772                 flushing = true;
4773                 space_info->flush = 1;
4774         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4775                 used += orig_bytes;
4776                 /*
4777                  * We will do the space reservation dance during log replay,
4778                  * which means we won't have fs_info->fs_root set, so don't do
4779                  * the async reclaim as we will panic.
4780                  */
4781                 if (!root->fs_info->log_root_recovering &&
4782                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4783                     !work_busy(&root->fs_info->async_reclaim_work))
4784                         queue_work(system_unbound_wq,
4785                                    &root->fs_info->async_reclaim_work);
4786         }
4787         spin_unlock(&space_info->lock);
4788
4789         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4790                 goto out;
4791
4792         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4793                           flush_state);
4794         flush_state++;
4795
4796         /*
4797          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4798          * would happen. So skip delalloc flush.
4799          */
4800         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4801             (flush_state == FLUSH_DELALLOC ||
4802              flush_state == FLUSH_DELALLOC_WAIT))
4803                 flush_state = ALLOC_CHUNK;
4804
4805         if (!ret)
4806                 goto again;
4807         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4808                  flush_state < COMMIT_TRANS)
4809                 goto again;
4810         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4811                  flush_state <= COMMIT_TRANS)
4812                 goto again;
4813
4814 out:
4815         if (ret == -ENOSPC &&
4816             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4817                 struct btrfs_block_rsv *global_rsv =
4818                         &root->fs_info->global_block_rsv;
4819
4820                 if (block_rsv != global_rsv &&
4821                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4822                         ret = 0;
4823         }
4824         if (ret == -ENOSPC)
4825                 trace_btrfs_space_reservation(root->fs_info,
4826                                               "space_info:enospc",
4827                                               space_info->flags, orig_bytes, 1);
4828         if (flushing) {
4829                 spin_lock(&space_info->lock);
4830                 space_info->flush = 0;
4831                 wake_up_all(&space_info->wait);
4832                 spin_unlock(&space_info->lock);
4833         }
4834         return ret;
4835 }
4836
4837 static struct btrfs_block_rsv *get_block_rsv(
4838                                         const struct btrfs_trans_handle *trans,
4839                                         const struct btrfs_root *root)
4840 {
4841         struct btrfs_block_rsv *block_rsv = NULL;
4842
4843         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4844                 block_rsv = trans->block_rsv;
4845
4846         if (root == root->fs_info->csum_root && trans->adding_csums)
4847                 block_rsv = trans->block_rsv;
4848
4849         if (root == root->fs_info->uuid_root)
4850                 block_rsv = trans->block_rsv;
4851
4852         if (!block_rsv)
4853                 block_rsv = root->block_rsv;
4854
4855         if (!block_rsv)
4856                 block_rsv = &root->fs_info->empty_block_rsv;
4857
4858         return block_rsv;
4859 }
4860
4861 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4862                                u64 num_bytes)
4863 {
4864         int ret = -ENOSPC;
4865         spin_lock(&block_rsv->lock);
4866         if (block_rsv->reserved >= num_bytes) {
4867                 block_rsv->reserved -= num_bytes;
4868                 if (block_rsv->reserved < block_rsv->size)
4869                         block_rsv->full = 0;
4870                 ret = 0;
4871         }
4872         spin_unlock(&block_rsv->lock);
4873         return ret;
4874 }
4875
4876 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4877                                 u64 num_bytes, int update_size)
4878 {
4879         spin_lock(&block_rsv->lock);
4880         block_rsv->reserved += num_bytes;
4881         if (update_size)
4882                 block_rsv->size += num_bytes;
4883         else if (block_rsv->reserved >= block_rsv->size)
4884                 block_rsv->full = 1;
4885         spin_unlock(&block_rsv->lock);
4886 }
4887
4888 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4889                              struct btrfs_block_rsv *dest, u64 num_bytes,
4890                              int min_factor)
4891 {
4892         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4893         u64 min_bytes;
4894
4895         if (global_rsv->space_info != dest->space_info)
4896                 return -ENOSPC;
4897
4898         spin_lock(&global_rsv->lock);
4899         min_bytes = div_factor(global_rsv->size, min_factor);
4900         if (global_rsv->reserved < min_bytes + num_bytes) {
4901                 spin_unlock(&global_rsv->lock);
4902                 return -ENOSPC;
4903         }
4904         global_rsv->reserved -= num_bytes;
4905         if (global_rsv->reserved < global_rsv->size)
4906                 global_rsv->full = 0;
4907         spin_unlock(&global_rsv->lock);
4908
4909         block_rsv_add_bytes(dest, num_bytes, 1);
4910         return 0;
4911 }
4912
4913 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4914                                     struct btrfs_block_rsv *block_rsv,
4915                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4916 {
4917         struct btrfs_space_info *space_info = block_rsv->space_info;
4918
4919         spin_lock(&block_rsv->lock);
4920         if (num_bytes == (u64)-1)
4921                 num_bytes = block_rsv->size;
4922         block_rsv->size -= num_bytes;
4923         if (block_rsv->reserved >= block_rsv->size) {
4924                 num_bytes = block_rsv->reserved - block_rsv->size;
4925                 block_rsv->reserved = block_rsv->size;
4926                 block_rsv->full = 1;
4927         } else {
4928                 num_bytes = 0;
4929         }
4930         spin_unlock(&block_rsv->lock);
4931
4932         if (num_bytes > 0) {
4933                 if (dest) {
4934                         spin_lock(&dest->lock);
4935                         if (!dest->full) {
4936                                 u64 bytes_to_add;
4937
4938                                 bytes_to_add = dest->size - dest->reserved;
4939                                 bytes_to_add = min(num_bytes, bytes_to_add);
4940                                 dest->reserved += bytes_to_add;
4941                                 if (dest->reserved >= dest->size)
4942                                         dest->full = 1;
4943                                 num_bytes -= bytes_to_add;
4944                         }
4945                         spin_unlock(&dest->lock);
4946                 }
4947                 if (num_bytes) {
4948                         spin_lock(&space_info->lock);
4949                         space_info->bytes_may_use -= num_bytes;
4950                         trace_btrfs_space_reservation(fs_info, "space_info",
4951                                         space_info->flags, num_bytes, 0);
4952                         spin_unlock(&space_info->lock);
4953                 }
4954         }
4955 }
4956
4957 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4958                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4959 {
4960         int ret;
4961
4962         ret = block_rsv_use_bytes(src, num_bytes);
4963         if (ret)
4964                 return ret;
4965
4966         block_rsv_add_bytes(dst, num_bytes, 1);
4967         return 0;
4968 }
4969
4970 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4971 {
4972         memset(rsv, 0, sizeof(*rsv));
4973         spin_lock_init(&rsv->lock);
4974         rsv->type = type;
4975 }
4976
4977 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4978                                               unsigned short type)
4979 {
4980         struct btrfs_block_rsv *block_rsv;
4981         struct btrfs_fs_info *fs_info = root->fs_info;
4982
4983         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4984         if (!block_rsv)
4985                 return NULL;
4986
4987         btrfs_init_block_rsv(block_rsv, type);
4988         block_rsv->space_info = __find_space_info(fs_info,
4989                                                   BTRFS_BLOCK_GROUP_METADATA);
4990         return block_rsv;
4991 }
4992
4993 void btrfs_free_block_rsv(struct btrfs_root *root,
4994                           struct btrfs_block_rsv *rsv)
4995 {
4996         if (!rsv)
4997                 return;
4998         btrfs_block_rsv_release(root, rsv, (u64)-1);
4999         kfree(rsv);
5000 }
5001
5002 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5003 {
5004         kfree(rsv);
5005 }
5006
5007 int btrfs_block_rsv_add(struct btrfs_root *root,
5008                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5009                         enum btrfs_reserve_flush_enum flush)
5010 {
5011         int ret;
5012
5013         if (num_bytes == 0)
5014                 return 0;
5015
5016         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5017         if (!ret) {
5018                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5019                 return 0;
5020         }
5021
5022         return ret;
5023 }
5024
5025 int btrfs_block_rsv_check(struct btrfs_root *root,
5026                           struct btrfs_block_rsv *block_rsv, int min_factor)
5027 {
5028         u64 num_bytes = 0;
5029         int ret = -ENOSPC;
5030
5031         if (!block_rsv)
5032                 return 0;
5033
5034         spin_lock(&block_rsv->lock);
5035         num_bytes = div_factor(block_rsv->size, min_factor);
5036         if (block_rsv->reserved >= num_bytes)
5037                 ret = 0;
5038         spin_unlock(&block_rsv->lock);
5039
5040         return ret;
5041 }
5042
5043 int btrfs_block_rsv_refill(struct btrfs_root *root,
5044                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5045                            enum btrfs_reserve_flush_enum flush)
5046 {
5047         u64 num_bytes = 0;
5048         int ret = -ENOSPC;
5049
5050         if (!block_rsv)
5051                 return 0;
5052
5053         spin_lock(&block_rsv->lock);
5054         num_bytes = min_reserved;
5055         if (block_rsv->reserved >= num_bytes)
5056                 ret = 0;
5057         else
5058                 num_bytes -= block_rsv->reserved;
5059         spin_unlock(&block_rsv->lock);
5060
5061         if (!ret)
5062                 return 0;
5063
5064         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5065         if (!ret) {
5066                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5067                 return 0;
5068         }
5069
5070         return ret;
5071 }
5072
5073 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5074                             struct btrfs_block_rsv *dst_rsv,
5075                             u64 num_bytes)
5076 {
5077         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5078 }
5079
5080 void btrfs_block_rsv_release(struct btrfs_root *root,
5081                              struct btrfs_block_rsv *block_rsv,
5082                              u64 num_bytes)
5083 {
5084         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5085         if (global_rsv == block_rsv ||
5086             block_rsv->space_info != global_rsv->space_info)
5087                 global_rsv = NULL;
5088         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5089                                 num_bytes);
5090 }
5091
5092 /*
5093  * helper to calculate size of global block reservation.
5094  * the desired value is sum of space used by extent tree,
5095  * checksum tree and root tree
5096  */
5097 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5098 {
5099         struct btrfs_space_info *sinfo;
5100         u64 num_bytes;
5101         u64 meta_used;
5102         u64 data_used;
5103         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5104
5105         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5106         spin_lock(&sinfo->lock);
5107         data_used = sinfo->bytes_used;
5108         spin_unlock(&sinfo->lock);
5109
5110         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5111         spin_lock(&sinfo->lock);
5112         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5113                 data_used = 0;
5114         meta_used = sinfo->bytes_used;
5115         spin_unlock(&sinfo->lock);
5116
5117         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5118                     csum_size * 2;
5119         num_bytes += div_u64(data_used + meta_used, 50);
5120
5121         if (num_bytes * 3 > meta_used)
5122                 num_bytes = div_u64(meta_used, 3);
5123
5124         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5125 }
5126
5127 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5128 {
5129         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5130         struct btrfs_space_info *sinfo = block_rsv->space_info;
5131         u64 num_bytes;
5132
5133         num_bytes = calc_global_metadata_size(fs_info);
5134
5135         spin_lock(&sinfo->lock);
5136         spin_lock(&block_rsv->lock);
5137
5138         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5139
5140         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5141                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5142                     sinfo->bytes_may_use;
5143
5144         if (sinfo->total_bytes > num_bytes) {
5145                 num_bytes = sinfo->total_bytes - num_bytes;
5146                 block_rsv->reserved += num_bytes;
5147                 sinfo->bytes_may_use += num_bytes;
5148                 trace_btrfs_space_reservation(fs_info, "space_info",
5149                                       sinfo->flags, num_bytes, 1);
5150         }
5151
5152         if (block_rsv->reserved >= block_rsv->size) {
5153                 num_bytes = block_rsv->reserved - block_rsv->size;
5154                 sinfo->bytes_may_use -= num_bytes;
5155                 trace_btrfs_space_reservation(fs_info, "space_info",
5156                                       sinfo->flags, num_bytes, 0);
5157                 block_rsv->reserved = block_rsv->size;
5158                 block_rsv->full = 1;
5159         }
5160
5161         spin_unlock(&block_rsv->lock);
5162         spin_unlock(&sinfo->lock);
5163 }
5164
5165 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5166 {
5167         struct btrfs_space_info *space_info;
5168
5169         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5170         fs_info->chunk_block_rsv.space_info = space_info;
5171
5172         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5173         fs_info->global_block_rsv.space_info = space_info;
5174         fs_info->delalloc_block_rsv.space_info = space_info;
5175         fs_info->trans_block_rsv.space_info = space_info;
5176         fs_info->empty_block_rsv.space_info = space_info;
5177         fs_info->delayed_block_rsv.space_info = space_info;
5178
5179         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5180         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5181         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5182         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5183         if (fs_info->quota_root)
5184                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5185         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5186
5187         update_global_block_rsv(fs_info);
5188 }
5189
5190 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5191 {
5192         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5193                                 (u64)-1);
5194         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5195         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5196         WARN_ON(fs_info->trans_block_rsv.size > 0);
5197         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5198         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5199         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5200         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5201         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5202 }
5203
5204 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5205                                   struct btrfs_root *root)
5206 {
5207         if (!trans->block_rsv)
5208                 return;
5209
5210         if (!trans->bytes_reserved)
5211                 return;
5212
5213         trace_btrfs_space_reservation(root->fs_info, "transaction",
5214                                       trans->transid, trans->bytes_reserved, 0);
5215         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5216         trans->bytes_reserved = 0;
5217 }
5218
5219 /*
5220  * To be called after all the new block groups attached to the transaction
5221  * handle have been created (btrfs_create_pending_block_groups()).
5222  */
5223 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5224 {
5225         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5226
5227         if (!trans->chunk_bytes_reserved)
5228                 return;
5229
5230         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5231
5232         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5233                                 trans->chunk_bytes_reserved);
5234         trans->chunk_bytes_reserved = 0;
5235 }
5236
5237 /* Can only return 0 or -ENOSPC */
5238 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5239                                   struct inode *inode)
5240 {
5241         struct btrfs_root *root = BTRFS_I(inode)->root;
5242         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5243         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5244
5245         /*
5246          * We need to hold space in order to delete our orphan item once we've
5247          * added it, so this takes the reservation so we can release it later
5248          * when we are truly done with the orphan item.
5249          */
5250         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5251         trace_btrfs_space_reservation(root->fs_info, "orphan",
5252                                       btrfs_ino(inode), num_bytes, 1);
5253         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5254 }
5255
5256 void btrfs_orphan_release_metadata(struct inode *inode)
5257 {
5258         struct btrfs_root *root = BTRFS_I(inode)->root;
5259         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5260         trace_btrfs_space_reservation(root->fs_info, "orphan",
5261                                       btrfs_ino(inode), num_bytes, 0);
5262         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5263 }
5264
5265 /*
5266  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5267  * root: the root of the parent directory
5268  * rsv: block reservation
5269  * items: the number of items that we need do reservation
5270  * qgroup_reserved: used to return the reserved size in qgroup
5271  *
5272  * This function is used to reserve the space for snapshot/subvolume
5273  * creation and deletion. Those operations are different with the
5274  * common file/directory operations, they change two fs/file trees
5275  * and root tree, the number of items that the qgroup reserves is
5276  * different with the free space reservation. So we can not use
5277  * the space reseravtion mechanism in start_transaction().
5278  */
5279 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5280                                      struct btrfs_block_rsv *rsv,
5281                                      int items,
5282                                      u64 *qgroup_reserved,
5283                                      bool use_global_rsv)
5284 {
5285         u64 num_bytes;
5286         int ret;
5287         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5288
5289         if (root->fs_info->quota_enabled) {
5290                 /* One for parent inode, two for dir entries */
5291                 num_bytes = 3 * root->nodesize;
5292                 ret = btrfs_qgroup_reserve(root, num_bytes);
5293                 if (ret)
5294                         return ret;
5295         } else {
5296                 num_bytes = 0;
5297         }
5298
5299         *qgroup_reserved = num_bytes;
5300
5301         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5302         rsv->space_info = __find_space_info(root->fs_info,
5303                                             BTRFS_BLOCK_GROUP_METADATA);
5304         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5305                                   BTRFS_RESERVE_FLUSH_ALL);
5306
5307         if (ret == -ENOSPC && use_global_rsv)
5308                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5309
5310         if (ret) {
5311                 if (*qgroup_reserved)
5312                         btrfs_qgroup_free(root, *qgroup_reserved);
5313         }
5314
5315         return ret;
5316 }
5317
5318 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5319                                       struct btrfs_block_rsv *rsv,
5320                                       u64 qgroup_reserved)
5321 {
5322         btrfs_block_rsv_release(root, rsv, (u64)-1);
5323 }
5324
5325 /**
5326  * drop_outstanding_extent - drop an outstanding extent
5327  * @inode: the inode we're dropping the extent for
5328  * @num_bytes: the number of bytes we're relaseing.
5329  *
5330  * This is called when we are freeing up an outstanding extent, either called
5331  * after an error or after an extent is written.  This will return the number of
5332  * reserved extents that need to be freed.  This must be called with
5333  * BTRFS_I(inode)->lock held.
5334  */
5335 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5336 {
5337         unsigned drop_inode_space = 0;
5338         unsigned dropped_extents = 0;
5339         unsigned num_extents = 0;
5340
5341         num_extents = (unsigned)div64_u64(num_bytes +
5342                                           BTRFS_MAX_EXTENT_SIZE - 1,
5343                                           BTRFS_MAX_EXTENT_SIZE);
5344         ASSERT(num_extents);
5345         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5346         BTRFS_I(inode)->outstanding_extents -= num_extents;
5347
5348         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5349             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5350                                &BTRFS_I(inode)->runtime_flags))
5351                 drop_inode_space = 1;
5352
5353         /*
5354          * If we have more or the same amount of outsanding extents than we have
5355          * reserved then we need to leave the reserved extents count alone.
5356          */
5357         if (BTRFS_I(inode)->outstanding_extents >=
5358             BTRFS_I(inode)->reserved_extents)
5359                 return drop_inode_space;
5360
5361         dropped_extents = BTRFS_I(inode)->reserved_extents -
5362                 BTRFS_I(inode)->outstanding_extents;
5363         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5364         return dropped_extents + drop_inode_space;
5365 }
5366
5367 /**
5368  * calc_csum_metadata_size - return the amount of metada space that must be
5369  *      reserved/free'd for the given bytes.
5370  * @inode: the inode we're manipulating
5371  * @num_bytes: the number of bytes in question
5372  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5373  *
5374  * This adjusts the number of csum_bytes in the inode and then returns the
5375  * correct amount of metadata that must either be reserved or freed.  We
5376  * calculate how many checksums we can fit into one leaf and then divide the
5377  * number of bytes that will need to be checksumed by this value to figure out
5378  * how many checksums will be required.  If we are adding bytes then the number
5379  * may go up and we will return the number of additional bytes that must be
5380  * reserved.  If it is going down we will return the number of bytes that must
5381  * be freed.
5382  *
5383  * This must be called with BTRFS_I(inode)->lock held.
5384  */
5385 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5386                                    int reserve)
5387 {
5388         struct btrfs_root *root = BTRFS_I(inode)->root;
5389         u64 old_csums, num_csums;
5390
5391         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5392             BTRFS_I(inode)->csum_bytes == 0)
5393                 return 0;
5394
5395         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5396         if (reserve)
5397                 BTRFS_I(inode)->csum_bytes += num_bytes;
5398         else
5399                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5400         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5401
5402         /* No change, no need to reserve more */
5403         if (old_csums == num_csums)
5404                 return 0;
5405
5406         if (reserve)
5407                 return btrfs_calc_trans_metadata_size(root,
5408                                                       num_csums - old_csums);
5409
5410         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5411 }
5412
5413 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5414 {
5415         struct btrfs_root *root = BTRFS_I(inode)->root;
5416         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5417         u64 to_reserve = 0;
5418         u64 csum_bytes;
5419         unsigned nr_extents = 0;
5420         int extra_reserve = 0;
5421         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5422         int ret = 0;
5423         bool delalloc_lock = true;
5424         u64 to_free = 0;
5425         unsigned dropped;
5426
5427         /* If we are a free space inode we need to not flush since we will be in
5428          * the middle of a transaction commit.  We also don't need the delalloc
5429          * mutex since we won't race with anybody.  We need this mostly to make
5430          * lockdep shut its filthy mouth.
5431          */
5432         if (btrfs_is_free_space_inode(inode)) {
5433                 flush = BTRFS_RESERVE_NO_FLUSH;
5434                 delalloc_lock = false;
5435         }
5436
5437         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5438             btrfs_transaction_in_commit(root->fs_info))
5439                 schedule_timeout(1);
5440
5441         if (delalloc_lock)
5442                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5443
5444         num_bytes = ALIGN(num_bytes, root->sectorsize);
5445
5446         spin_lock(&BTRFS_I(inode)->lock);
5447         nr_extents = (unsigned)div64_u64(num_bytes +
5448                                          BTRFS_MAX_EXTENT_SIZE - 1,
5449                                          BTRFS_MAX_EXTENT_SIZE);
5450         BTRFS_I(inode)->outstanding_extents += nr_extents;
5451         nr_extents = 0;
5452
5453         if (BTRFS_I(inode)->outstanding_extents >
5454             BTRFS_I(inode)->reserved_extents)
5455                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5456                         BTRFS_I(inode)->reserved_extents;
5457
5458         /*
5459          * Add an item to reserve for updating the inode when we complete the
5460          * delalloc io.
5461          */
5462         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5463                       &BTRFS_I(inode)->runtime_flags)) {
5464                 nr_extents++;
5465                 extra_reserve = 1;
5466         }
5467
5468         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5469         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5470         csum_bytes = BTRFS_I(inode)->csum_bytes;
5471         spin_unlock(&BTRFS_I(inode)->lock);
5472
5473         if (root->fs_info->quota_enabled) {
5474                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5475                 if (ret)
5476                         goto out_fail;
5477         }
5478
5479         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5480         if (unlikely(ret)) {
5481                 if (root->fs_info->quota_enabled)
5482                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5483                 goto out_fail;
5484         }
5485
5486         spin_lock(&BTRFS_I(inode)->lock);
5487         if (extra_reserve) {
5488                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5489                         &BTRFS_I(inode)->runtime_flags);
5490                 nr_extents--;
5491         }
5492         BTRFS_I(inode)->reserved_extents += nr_extents;
5493         spin_unlock(&BTRFS_I(inode)->lock);
5494
5495         if (delalloc_lock)
5496                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5497
5498         if (to_reserve)
5499                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5500                                               btrfs_ino(inode), to_reserve, 1);
5501         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5502
5503         return 0;
5504
5505 out_fail:
5506         spin_lock(&BTRFS_I(inode)->lock);
5507         dropped = drop_outstanding_extent(inode, num_bytes);
5508         /*
5509          * If the inodes csum_bytes is the same as the original
5510          * csum_bytes then we know we haven't raced with any free()ers
5511          * so we can just reduce our inodes csum bytes and carry on.
5512          */
5513         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5514                 calc_csum_metadata_size(inode, num_bytes, 0);
5515         } else {
5516                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5517                 u64 bytes;
5518
5519                 /*
5520                  * This is tricky, but first we need to figure out how much we
5521                  * free'd from any free-ers that occured during this
5522                  * reservation, so we reset ->csum_bytes to the csum_bytes
5523                  * before we dropped our lock, and then call the free for the
5524                  * number of bytes that were freed while we were trying our
5525                  * reservation.
5526                  */
5527                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5528                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5529                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5530
5531
5532                 /*
5533                  * Now we need to see how much we would have freed had we not
5534                  * been making this reservation and our ->csum_bytes were not
5535                  * artificially inflated.
5536                  */
5537                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5538                 bytes = csum_bytes - orig_csum_bytes;
5539                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5540
5541                 /*
5542                  * Now reset ->csum_bytes to what it should be.  If bytes is
5543                  * more than to_free then we would have free'd more space had we
5544                  * not had an artificially high ->csum_bytes, so we need to free
5545                  * the remainder.  If bytes is the same or less then we don't
5546                  * need to do anything, the other free-ers did the correct
5547                  * thing.
5548                  */
5549                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5550                 if (bytes > to_free)
5551                         to_free = bytes - to_free;
5552                 else
5553                         to_free = 0;
5554         }
5555         spin_unlock(&BTRFS_I(inode)->lock);
5556         if (dropped)
5557                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5558
5559         if (to_free) {
5560                 btrfs_block_rsv_release(root, block_rsv, to_free);
5561                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5562                                               btrfs_ino(inode), to_free, 0);
5563         }
5564         if (delalloc_lock)
5565                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5566         return ret;
5567 }
5568
5569 /**
5570  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5571  * @inode: the inode to release the reservation for
5572  * @num_bytes: the number of bytes we're releasing
5573  *
5574  * This will release the metadata reservation for an inode.  This can be called
5575  * once we complete IO for a given set of bytes to release their metadata
5576  * reservations.
5577  */
5578 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5579 {
5580         struct btrfs_root *root = BTRFS_I(inode)->root;
5581         u64 to_free = 0;
5582         unsigned dropped;
5583
5584         num_bytes = ALIGN(num_bytes, root->sectorsize);
5585         spin_lock(&BTRFS_I(inode)->lock);
5586         dropped = drop_outstanding_extent(inode, num_bytes);
5587
5588         if (num_bytes)
5589                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5590         spin_unlock(&BTRFS_I(inode)->lock);
5591         if (dropped > 0)
5592                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5593
5594         if (btrfs_test_is_dummy_root(root))
5595                 return;
5596
5597         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5598                                       btrfs_ino(inode), to_free, 0);
5599
5600         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5601                                 to_free);
5602 }
5603
5604 /**
5605  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5606  * @inode: inode we're writing to
5607  * @num_bytes: the number of bytes we want to allocate
5608  *
5609  * This will do the following things
5610  *
5611  * o reserve space in the data space info for num_bytes
5612  * o reserve space in the metadata space info based on number of outstanding
5613  *   extents and how much csums will be needed
5614  * o add to the inodes ->delalloc_bytes
5615  * o add it to the fs_info's delalloc inodes list.
5616  *
5617  * This will return 0 for success and -ENOSPC if there is no space left.
5618  */
5619 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5620 {
5621         int ret;
5622
5623         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5624         if (ret)
5625                 return ret;
5626
5627         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5628         if (ret) {
5629                 btrfs_free_reserved_data_space(inode, num_bytes);
5630                 return ret;
5631         }
5632
5633         return 0;
5634 }
5635
5636 /**
5637  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5638  * @inode: inode we're releasing space for
5639  * @num_bytes: the number of bytes we want to free up
5640  *
5641  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5642  * called in the case that we don't need the metadata AND data reservations
5643  * anymore.  So if there is an error or we insert an inline extent.
5644  *
5645  * This function will release the metadata space that was not used and will
5646  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5647  * list if there are no delalloc bytes left.
5648  */
5649 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5650 {
5651         btrfs_delalloc_release_metadata(inode, num_bytes);
5652         btrfs_free_reserved_data_space(inode, num_bytes);
5653 }
5654
5655 static int update_block_group(struct btrfs_trans_handle *trans,
5656                               struct btrfs_root *root, u64 bytenr,
5657                               u64 num_bytes, int alloc)
5658 {
5659         struct btrfs_block_group_cache *cache = NULL;
5660         struct btrfs_fs_info *info = root->fs_info;
5661         u64 total = num_bytes;
5662         u64 old_val;
5663         u64 byte_in_group;
5664         int factor;
5665
5666         /* block accounting for super block */
5667         spin_lock(&info->delalloc_root_lock);
5668         old_val = btrfs_super_bytes_used(info->super_copy);
5669         if (alloc)
5670                 old_val += num_bytes;
5671         else
5672                 old_val -= num_bytes;
5673         btrfs_set_super_bytes_used(info->super_copy, old_val);
5674         spin_unlock(&info->delalloc_root_lock);
5675
5676         while (total) {
5677                 cache = btrfs_lookup_block_group(info, bytenr);
5678                 if (!cache)
5679                         return -ENOENT;
5680                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5681                                     BTRFS_BLOCK_GROUP_RAID1 |
5682                                     BTRFS_BLOCK_GROUP_RAID10))
5683                         factor = 2;
5684                 else
5685                         factor = 1;
5686                 /*
5687                  * If this block group has free space cache written out, we
5688                  * need to make sure to load it if we are removing space.  This
5689                  * is because we need the unpinning stage to actually add the
5690                  * space back to the block group, otherwise we will leak space.
5691                  */
5692                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5693                         cache_block_group(cache, 1);
5694
5695                 byte_in_group = bytenr - cache->key.objectid;
5696                 WARN_ON(byte_in_group > cache->key.offset);
5697
5698                 spin_lock(&cache->space_info->lock);
5699                 spin_lock(&cache->lock);
5700
5701                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5702                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5703                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5704
5705                 old_val = btrfs_block_group_used(&cache->item);
5706                 num_bytes = min(total, cache->key.offset - byte_in_group);
5707                 if (alloc) {
5708                         old_val += num_bytes;
5709                         btrfs_set_block_group_used(&cache->item, old_val);
5710                         cache->reserved -= num_bytes;
5711                         cache->space_info->bytes_reserved -= num_bytes;
5712                         cache->space_info->bytes_used += num_bytes;
5713                         cache->space_info->disk_used += num_bytes * factor;
5714                         spin_unlock(&cache->lock);
5715                         spin_unlock(&cache->space_info->lock);
5716                 } else {
5717                         old_val -= num_bytes;
5718                         btrfs_set_block_group_used(&cache->item, old_val);
5719                         cache->pinned += num_bytes;
5720                         cache->space_info->bytes_pinned += num_bytes;
5721                         cache->space_info->bytes_used -= num_bytes;
5722                         cache->space_info->disk_used -= num_bytes * factor;
5723                         spin_unlock(&cache->lock);
5724                         spin_unlock(&cache->space_info->lock);
5725
5726                         set_extent_dirty(info->pinned_extents,
5727                                          bytenr, bytenr + num_bytes - 1,
5728                                          GFP_NOFS | __GFP_NOFAIL);
5729                         /*
5730                          * No longer have used bytes in this block group, queue
5731                          * it for deletion.
5732                          */
5733                         if (old_val == 0) {
5734                                 spin_lock(&info->unused_bgs_lock);
5735                                 if (list_empty(&cache->bg_list)) {
5736                                         btrfs_get_block_group(cache);
5737                                         list_add_tail(&cache->bg_list,
5738                                                       &info->unused_bgs);
5739                                 }
5740                                 spin_unlock(&info->unused_bgs_lock);
5741                         }
5742                 }
5743
5744                 spin_lock(&trans->transaction->dirty_bgs_lock);
5745                 if (list_empty(&cache->dirty_list)) {
5746                         list_add_tail(&cache->dirty_list,
5747                                       &trans->transaction->dirty_bgs);
5748                                 trans->transaction->num_dirty_bgs++;
5749                         btrfs_get_block_group(cache);
5750                 }
5751                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5752
5753                 btrfs_put_block_group(cache);
5754                 total -= num_bytes;
5755                 bytenr += num_bytes;
5756         }
5757         return 0;
5758 }
5759
5760 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5761 {
5762         struct btrfs_block_group_cache *cache;
5763         u64 bytenr;
5764
5765         spin_lock(&root->fs_info->block_group_cache_lock);
5766         bytenr = root->fs_info->first_logical_byte;
5767         spin_unlock(&root->fs_info->block_group_cache_lock);
5768
5769         if (bytenr < (u64)-1)
5770                 return bytenr;
5771
5772         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5773         if (!cache)
5774                 return 0;
5775
5776         bytenr = cache->key.objectid;
5777         btrfs_put_block_group(cache);
5778
5779         return bytenr;
5780 }
5781
5782 static int pin_down_extent(struct btrfs_root *root,
5783                            struct btrfs_block_group_cache *cache,
5784                            u64 bytenr, u64 num_bytes, int reserved)
5785 {
5786         spin_lock(&cache->space_info->lock);
5787         spin_lock(&cache->lock);
5788         cache->pinned += num_bytes;
5789         cache->space_info->bytes_pinned += num_bytes;
5790         if (reserved) {
5791                 cache->reserved -= num_bytes;
5792                 cache->space_info->bytes_reserved -= num_bytes;
5793         }
5794         spin_unlock(&cache->lock);
5795         spin_unlock(&cache->space_info->lock);
5796
5797         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5798                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5799         if (reserved)
5800                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5801         return 0;
5802 }
5803
5804 /*
5805  * this function must be called within transaction
5806  */
5807 int btrfs_pin_extent(struct btrfs_root *root,
5808                      u64 bytenr, u64 num_bytes, int reserved)
5809 {
5810         struct btrfs_block_group_cache *cache;
5811
5812         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5813         BUG_ON(!cache); /* Logic error */
5814
5815         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5816
5817         btrfs_put_block_group(cache);
5818         return 0;
5819 }
5820
5821 /*
5822  * this function must be called within transaction
5823  */
5824 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5825                                     u64 bytenr, u64 num_bytes)
5826 {
5827         struct btrfs_block_group_cache *cache;
5828         int ret;
5829
5830         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5831         if (!cache)
5832                 return -EINVAL;
5833
5834         /*
5835          * pull in the free space cache (if any) so that our pin
5836          * removes the free space from the cache.  We have load_only set
5837          * to one because the slow code to read in the free extents does check
5838          * the pinned extents.
5839          */
5840         cache_block_group(cache, 1);
5841
5842         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5843
5844         /* remove us from the free space cache (if we're there at all) */
5845         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5846         btrfs_put_block_group(cache);
5847         return ret;
5848 }
5849
5850 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5851 {
5852         int ret;
5853         struct btrfs_block_group_cache *block_group;
5854         struct btrfs_caching_control *caching_ctl;
5855
5856         block_group = btrfs_lookup_block_group(root->fs_info, start);
5857         if (!block_group)
5858                 return -EINVAL;
5859
5860         cache_block_group(block_group, 0);
5861         caching_ctl = get_caching_control(block_group);
5862
5863         if (!caching_ctl) {
5864                 /* Logic error */
5865                 BUG_ON(!block_group_cache_done(block_group));
5866                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5867         } else {
5868                 mutex_lock(&caching_ctl->mutex);
5869
5870                 if (start >= caching_ctl->progress) {
5871                         ret = add_excluded_extent(root, start, num_bytes);
5872                 } else if (start + num_bytes <= caching_ctl->progress) {
5873                         ret = btrfs_remove_free_space(block_group,
5874                                                       start, num_bytes);
5875                 } else {
5876                         num_bytes = caching_ctl->progress - start;
5877                         ret = btrfs_remove_free_space(block_group,
5878                                                       start, num_bytes);
5879                         if (ret)
5880                                 goto out_lock;
5881
5882                         num_bytes = (start + num_bytes) -
5883                                 caching_ctl->progress;
5884                         start = caching_ctl->progress;
5885                         ret = add_excluded_extent(root, start, num_bytes);
5886                 }
5887 out_lock:
5888                 mutex_unlock(&caching_ctl->mutex);
5889                 put_caching_control(caching_ctl);
5890         }
5891         btrfs_put_block_group(block_group);
5892         return ret;
5893 }
5894
5895 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5896                                  struct extent_buffer *eb)
5897 {
5898         struct btrfs_file_extent_item *item;
5899         struct btrfs_key key;
5900         int found_type;
5901         int i;
5902
5903         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5904                 return 0;
5905
5906         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5907                 btrfs_item_key_to_cpu(eb, &key, i);
5908                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5909                         continue;
5910                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5911                 found_type = btrfs_file_extent_type(eb, item);
5912                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5913                         continue;
5914                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5915                         continue;
5916                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5917                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5918                 __exclude_logged_extent(log, key.objectid, key.offset);
5919         }
5920
5921         return 0;
5922 }
5923
5924 /**
5925  * btrfs_update_reserved_bytes - update the block_group and space info counters
5926  * @cache:      The cache we are manipulating
5927  * @num_bytes:  The number of bytes in question
5928  * @reserve:    One of the reservation enums
5929  * @delalloc:   The blocks are allocated for the delalloc write
5930  *
5931  * This is called by the allocator when it reserves space, or by somebody who is
5932  * freeing space that was never actually used on disk.  For example if you
5933  * reserve some space for a new leaf in transaction A and before transaction A
5934  * commits you free that leaf, you call this with reserve set to 0 in order to
5935  * clear the reservation.
5936  *
5937  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5938  * ENOSPC accounting.  For data we handle the reservation through clearing the
5939  * delalloc bits in the io_tree.  We have to do this since we could end up
5940  * allocating less disk space for the amount of data we have reserved in the
5941  * case of compression.
5942  *
5943  * If this is a reservation and the block group has become read only we cannot
5944  * make the reservation and return -EAGAIN, otherwise this function always
5945  * succeeds.
5946  */
5947 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5948                                        u64 num_bytes, int reserve, int delalloc)
5949 {
5950         struct btrfs_space_info *space_info = cache->space_info;
5951         int ret = 0;
5952
5953         spin_lock(&space_info->lock);
5954         spin_lock(&cache->lock);
5955         if (reserve != RESERVE_FREE) {
5956                 if (cache->ro) {
5957                         ret = -EAGAIN;
5958                 } else {
5959                         cache->reserved += num_bytes;
5960                         space_info->bytes_reserved += num_bytes;
5961                         if (reserve == RESERVE_ALLOC) {
5962                                 trace_btrfs_space_reservation(cache->fs_info,
5963                                                 "space_info", space_info->flags,
5964                                                 num_bytes, 0);
5965                                 space_info->bytes_may_use -= num_bytes;
5966                         }
5967
5968                         if (delalloc)
5969                                 cache->delalloc_bytes += num_bytes;
5970                 }
5971         } else {
5972                 if (cache->ro)
5973                         space_info->bytes_readonly += num_bytes;
5974                 cache->reserved -= num_bytes;
5975                 space_info->bytes_reserved -= num_bytes;
5976
5977                 if (delalloc)
5978                         cache->delalloc_bytes -= num_bytes;
5979         }
5980         spin_unlock(&cache->lock);
5981         spin_unlock(&space_info->lock);
5982         return ret;
5983 }
5984
5985 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5986                                 struct btrfs_root *root)
5987 {
5988         struct btrfs_fs_info *fs_info = root->fs_info;
5989         struct btrfs_caching_control *next;
5990         struct btrfs_caching_control *caching_ctl;
5991         struct btrfs_block_group_cache *cache;
5992
5993         down_write(&fs_info->commit_root_sem);
5994
5995         list_for_each_entry_safe(caching_ctl, next,
5996                                  &fs_info->caching_block_groups, list) {
5997                 cache = caching_ctl->block_group;
5998                 if (block_group_cache_done(cache)) {
5999                         cache->last_byte_to_unpin = (u64)-1;
6000                         list_del_init(&caching_ctl->list);
6001                         put_caching_control(caching_ctl);
6002                 } else {
6003                         cache->last_byte_to_unpin = caching_ctl->progress;
6004                 }
6005         }
6006
6007         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6008                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6009         else
6010                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6011
6012         up_write(&fs_info->commit_root_sem);
6013
6014         update_global_block_rsv(fs_info);
6015 }
6016
6017 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6018                               const bool return_free_space)
6019 {
6020         struct btrfs_fs_info *fs_info = root->fs_info;
6021         struct btrfs_block_group_cache *cache = NULL;
6022         struct btrfs_space_info *space_info;
6023         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6024         u64 len;
6025         bool readonly;
6026
6027         while (start <= end) {
6028                 readonly = false;
6029                 if (!cache ||
6030                     start >= cache->key.objectid + cache->key.offset) {
6031                         if (cache)
6032                                 btrfs_put_block_group(cache);
6033                         cache = btrfs_lookup_block_group(fs_info, start);
6034                         BUG_ON(!cache); /* Logic error */
6035                 }
6036
6037                 len = cache->key.objectid + cache->key.offset - start;
6038                 len = min(len, end + 1 - start);
6039
6040                 if (start < cache->last_byte_to_unpin) {
6041                         len = min(len, cache->last_byte_to_unpin - start);
6042                         if (return_free_space)
6043                                 btrfs_add_free_space(cache, start, len);
6044                 }
6045
6046                 start += len;
6047                 space_info = cache->space_info;
6048
6049                 spin_lock(&space_info->lock);
6050                 spin_lock(&cache->lock);
6051                 cache->pinned -= len;
6052                 space_info->bytes_pinned -= len;
6053                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6054                 if (cache->ro) {
6055                         space_info->bytes_readonly += len;
6056                         readonly = true;
6057                 }
6058                 spin_unlock(&cache->lock);
6059                 if (!readonly && global_rsv->space_info == space_info) {
6060                         spin_lock(&global_rsv->lock);
6061                         if (!global_rsv->full) {
6062                                 len = min(len, global_rsv->size -
6063                                           global_rsv->reserved);
6064                                 global_rsv->reserved += len;
6065                                 space_info->bytes_may_use += len;
6066                                 if (global_rsv->reserved >= global_rsv->size)
6067                                         global_rsv->full = 1;
6068                         }
6069                         spin_unlock(&global_rsv->lock);
6070                 }
6071                 spin_unlock(&space_info->lock);
6072         }
6073
6074         if (cache)
6075                 btrfs_put_block_group(cache);
6076         return 0;
6077 }
6078
6079 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6080                                struct btrfs_root *root)
6081 {
6082         struct btrfs_fs_info *fs_info = root->fs_info;
6083         struct extent_io_tree *unpin;
6084         u64 start;
6085         u64 end;
6086         int ret;
6087
6088         if (trans->aborted)
6089                 return 0;
6090
6091         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6092                 unpin = &fs_info->freed_extents[1];
6093         else
6094                 unpin = &fs_info->freed_extents[0];
6095
6096         while (1) {
6097                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6098                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6099                                             EXTENT_DIRTY, NULL);
6100                 if (ret) {
6101                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6102                         break;
6103                 }
6104
6105                 if (btrfs_test_opt(root, DISCARD))
6106                         ret = btrfs_discard_extent(root, start,
6107                                                    end + 1 - start, NULL);
6108
6109                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6110                 unpin_extent_range(root, start, end, true);
6111                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6112                 cond_resched();
6113         }
6114
6115         return 0;
6116 }
6117
6118 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6119                              u64 owner, u64 root_objectid)
6120 {
6121         struct btrfs_space_info *space_info;
6122         u64 flags;
6123
6124         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6125                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6126                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6127                 else
6128                         flags = BTRFS_BLOCK_GROUP_METADATA;
6129         } else {
6130                 flags = BTRFS_BLOCK_GROUP_DATA;
6131         }
6132
6133         space_info = __find_space_info(fs_info, flags);
6134         BUG_ON(!space_info); /* Logic bug */
6135         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6136 }
6137
6138
6139 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6140                                 struct btrfs_root *root,
6141                                 struct btrfs_delayed_ref_node *node, u64 parent,
6142                                 u64 root_objectid, u64 owner_objectid,
6143                                 u64 owner_offset, int refs_to_drop,
6144                                 struct btrfs_delayed_extent_op *extent_op)
6145 {
6146         struct btrfs_key key;
6147         struct btrfs_path *path;
6148         struct btrfs_fs_info *info = root->fs_info;
6149         struct btrfs_root *extent_root = info->extent_root;
6150         struct extent_buffer *leaf;
6151         struct btrfs_extent_item *ei;
6152         struct btrfs_extent_inline_ref *iref;
6153         int ret;
6154         int is_data;
6155         int extent_slot = 0;
6156         int found_extent = 0;
6157         int num_to_del = 1;
6158         int no_quota = node->no_quota;
6159         u32 item_size;
6160         u64 refs;
6161         u64 bytenr = node->bytenr;
6162         u64 num_bytes = node->num_bytes;
6163         int last_ref = 0;
6164         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6165                                                  SKINNY_METADATA);
6166
6167         if (!info->quota_enabled || !is_fstree(root_objectid))
6168                 no_quota = 1;
6169
6170         path = btrfs_alloc_path();
6171         if (!path)
6172                 return -ENOMEM;
6173
6174         path->reada = 1;
6175         path->leave_spinning = 1;
6176
6177         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6178         BUG_ON(!is_data && refs_to_drop != 1);
6179
6180         if (is_data)
6181                 skinny_metadata = 0;
6182
6183         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6184                                     bytenr, num_bytes, parent,
6185                                     root_objectid, owner_objectid,
6186                                     owner_offset);
6187         if (ret == 0) {
6188                 extent_slot = path->slots[0];
6189                 while (extent_slot >= 0) {
6190                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6191                                               extent_slot);
6192                         if (key.objectid != bytenr)
6193                                 break;
6194                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6195                             key.offset == num_bytes) {
6196                                 found_extent = 1;
6197                                 break;
6198                         }
6199                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6200                             key.offset == owner_objectid) {
6201                                 found_extent = 1;
6202                                 break;
6203                         }
6204                         if (path->slots[0] - extent_slot > 5)
6205                                 break;
6206                         extent_slot--;
6207                 }
6208 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6209                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6210                 if (found_extent && item_size < sizeof(*ei))
6211                         found_extent = 0;
6212 #endif
6213                 if (!found_extent) {
6214                         BUG_ON(iref);
6215                         ret = remove_extent_backref(trans, extent_root, path,
6216                                                     NULL, refs_to_drop,
6217                                                     is_data, &last_ref);
6218                         if (ret) {
6219                                 btrfs_abort_transaction(trans, extent_root, ret);
6220                                 goto out;
6221                         }
6222                         btrfs_release_path(path);
6223                         path->leave_spinning = 1;
6224
6225                         key.objectid = bytenr;
6226                         key.type = BTRFS_EXTENT_ITEM_KEY;
6227                         key.offset = num_bytes;
6228
6229                         if (!is_data && skinny_metadata) {
6230                                 key.type = BTRFS_METADATA_ITEM_KEY;
6231                                 key.offset = owner_objectid;
6232                         }
6233
6234                         ret = btrfs_search_slot(trans, extent_root,
6235                                                 &key, path, -1, 1);
6236                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6237                                 /*
6238                                  * Couldn't find our skinny metadata item,
6239                                  * see if we have ye olde extent item.
6240                                  */
6241                                 path->slots[0]--;
6242                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6243                                                       path->slots[0]);
6244                                 if (key.objectid == bytenr &&
6245                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6246                                     key.offset == num_bytes)
6247                                         ret = 0;
6248                         }
6249
6250                         if (ret > 0 && skinny_metadata) {
6251                                 skinny_metadata = false;
6252                                 key.objectid = bytenr;
6253                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6254                                 key.offset = num_bytes;
6255                                 btrfs_release_path(path);
6256                                 ret = btrfs_search_slot(trans, extent_root,
6257                                                         &key, path, -1, 1);
6258                         }
6259
6260                         if (ret) {
6261                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6262                                         ret, bytenr);
6263                                 if (ret > 0)
6264                                         btrfs_print_leaf(extent_root,
6265                                                          path->nodes[0]);
6266                         }
6267                         if (ret < 0) {
6268                                 btrfs_abort_transaction(trans, extent_root, ret);
6269                                 goto out;
6270                         }
6271                         extent_slot = path->slots[0];
6272                 }
6273         } else if (WARN_ON(ret == -ENOENT)) {
6274                 btrfs_print_leaf(extent_root, path->nodes[0]);
6275                 btrfs_err(info,
6276                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6277                         bytenr, parent, root_objectid, owner_objectid,
6278                         owner_offset);
6279                 btrfs_abort_transaction(trans, extent_root, ret);
6280                 goto out;
6281         } else {
6282                 btrfs_abort_transaction(trans, extent_root, ret);
6283                 goto out;
6284         }
6285
6286         leaf = path->nodes[0];
6287         item_size = btrfs_item_size_nr(leaf, extent_slot);
6288 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6289         if (item_size < sizeof(*ei)) {
6290                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6291                 ret = convert_extent_item_v0(trans, extent_root, path,
6292                                              owner_objectid, 0);
6293                 if (ret < 0) {
6294                         btrfs_abort_transaction(trans, extent_root, ret);
6295                         goto out;
6296                 }
6297
6298                 btrfs_release_path(path);
6299                 path->leave_spinning = 1;
6300
6301                 key.objectid = bytenr;
6302                 key.type = BTRFS_EXTENT_ITEM_KEY;
6303                 key.offset = num_bytes;
6304
6305                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6306                                         -1, 1);
6307                 if (ret) {
6308                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6309                                 ret, bytenr);
6310                         btrfs_print_leaf(extent_root, path->nodes[0]);
6311                 }
6312                 if (ret < 0) {
6313                         btrfs_abort_transaction(trans, extent_root, ret);
6314                         goto out;
6315                 }
6316
6317                 extent_slot = path->slots[0];
6318                 leaf = path->nodes[0];
6319                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6320         }
6321 #endif
6322         BUG_ON(item_size < sizeof(*ei));
6323         ei = btrfs_item_ptr(leaf, extent_slot,
6324                             struct btrfs_extent_item);
6325         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6326             key.type == BTRFS_EXTENT_ITEM_KEY) {
6327                 struct btrfs_tree_block_info *bi;
6328                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6329                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6330                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6331         }
6332
6333         refs = btrfs_extent_refs(leaf, ei);
6334         if (refs < refs_to_drop) {
6335                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6336                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6337                 ret = -EINVAL;
6338                 btrfs_abort_transaction(trans, extent_root, ret);
6339                 goto out;
6340         }
6341         refs -= refs_to_drop;
6342
6343         if (refs > 0) {
6344                 if (extent_op)
6345                         __run_delayed_extent_op(extent_op, leaf, ei);
6346                 /*
6347                  * In the case of inline back ref, reference count will
6348                  * be updated by remove_extent_backref
6349                  */
6350                 if (iref) {
6351                         BUG_ON(!found_extent);
6352                 } else {
6353                         btrfs_set_extent_refs(leaf, ei, refs);
6354                         btrfs_mark_buffer_dirty(leaf);
6355                 }
6356                 if (found_extent) {
6357                         ret = remove_extent_backref(trans, extent_root, path,
6358                                                     iref, refs_to_drop,
6359                                                     is_data, &last_ref);
6360                         if (ret) {
6361                                 btrfs_abort_transaction(trans, extent_root, ret);
6362                                 goto out;
6363                         }
6364                 }
6365                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6366                                  root_objectid);
6367         } else {
6368                 if (found_extent) {
6369                         BUG_ON(is_data && refs_to_drop !=
6370                                extent_data_ref_count(root, path, iref));
6371                         if (iref) {
6372                                 BUG_ON(path->slots[0] != extent_slot);
6373                         } else {
6374                                 BUG_ON(path->slots[0] != extent_slot + 1);
6375                                 path->slots[0] = extent_slot;
6376                                 num_to_del = 2;
6377                         }
6378                 }
6379
6380                 last_ref = 1;
6381                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6382                                       num_to_del);
6383                 if (ret) {
6384                         btrfs_abort_transaction(trans, extent_root, ret);
6385                         goto out;
6386                 }
6387                 btrfs_release_path(path);
6388
6389                 if (is_data) {
6390                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6391                         if (ret) {
6392                                 btrfs_abort_transaction(trans, extent_root, ret);
6393                                 goto out;
6394                         }
6395                 }
6396
6397                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6398                 if (ret) {
6399                         btrfs_abort_transaction(trans, extent_root, ret);
6400                         goto out;
6401                 }
6402         }
6403         btrfs_release_path(path);
6404
6405 out:
6406         btrfs_free_path(path);
6407         return ret;
6408 }
6409
6410 /*
6411  * when we free an block, it is possible (and likely) that we free the last
6412  * delayed ref for that extent as well.  This searches the delayed ref tree for
6413  * a given extent, and if there are no other delayed refs to be processed, it
6414  * removes it from the tree.
6415  */
6416 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6417                                       struct btrfs_root *root, u64 bytenr)
6418 {
6419         struct btrfs_delayed_ref_head *head;
6420         struct btrfs_delayed_ref_root *delayed_refs;
6421         int ret = 0;
6422
6423         delayed_refs = &trans->transaction->delayed_refs;
6424         spin_lock(&delayed_refs->lock);
6425         head = btrfs_find_delayed_ref_head(trans, bytenr);
6426         if (!head)
6427                 goto out_delayed_unlock;
6428
6429         spin_lock(&head->lock);
6430         if (!list_empty(&head->ref_list))
6431                 goto out;
6432
6433         if (head->extent_op) {
6434                 if (!head->must_insert_reserved)
6435                         goto out;
6436                 btrfs_free_delayed_extent_op(head->extent_op);
6437                 head->extent_op = NULL;
6438         }
6439
6440         /*
6441          * waiting for the lock here would deadlock.  If someone else has it
6442          * locked they are already in the process of dropping it anyway
6443          */
6444         if (!mutex_trylock(&head->mutex))
6445                 goto out;
6446
6447         /*
6448          * at this point we have a head with no other entries.  Go
6449          * ahead and process it.
6450          */
6451         head->node.in_tree = 0;
6452         rb_erase(&head->href_node, &delayed_refs->href_root);
6453
6454         atomic_dec(&delayed_refs->num_entries);
6455
6456         /*
6457          * we don't take a ref on the node because we're removing it from the
6458          * tree, so we just steal the ref the tree was holding.
6459          */
6460         delayed_refs->num_heads--;
6461         if (head->processing == 0)
6462                 delayed_refs->num_heads_ready--;
6463         head->processing = 0;
6464         spin_unlock(&head->lock);
6465         spin_unlock(&delayed_refs->lock);
6466
6467         BUG_ON(head->extent_op);
6468         if (head->must_insert_reserved)
6469                 ret = 1;
6470
6471         mutex_unlock(&head->mutex);
6472         btrfs_put_delayed_ref(&head->node);
6473         return ret;
6474 out:
6475         spin_unlock(&head->lock);
6476
6477 out_delayed_unlock:
6478         spin_unlock(&delayed_refs->lock);
6479         return 0;
6480 }
6481
6482 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6483                            struct btrfs_root *root,
6484                            struct extent_buffer *buf,
6485                            u64 parent, int last_ref)
6486 {
6487         int pin = 1;
6488         int ret;
6489
6490         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6491                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6492                                         buf->start, buf->len,
6493                                         parent, root->root_key.objectid,
6494                                         btrfs_header_level(buf),
6495                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6496                 BUG_ON(ret); /* -ENOMEM */
6497         }
6498
6499         if (!last_ref)
6500                 return;
6501
6502         if (btrfs_header_generation(buf) == trans->transid) {
6503                 struct btrfs_block_group_cache *cache;
6504
6505                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6506                         ret = check_ref_cleanup(trans, root, buf->start);
6507                         if (!ret)
6508                                 goto out;
6509                 }
6510
6511                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6512
6513                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6514                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6515                         btrfs_put_block_group(cache);
6516                         goto out;
6517                 }
6518
6519                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6520
6521                 btrfs_add_free_space(cache, buf->start, buf->len);
6522                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6523                 btrfs_put_block_group(cache);
6524                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6525                 pin = 0;
6526         }
6527 out:
6528         if (pin)
6529                 add_pinned_bytes(root->fs_info, buf->len,
6530                                  btrfs_header_level(buf),
6531                                  root->root_key.objectid);
6532
6533         /*
6534          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6535          * anymore.
6536          */
6537         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6538 }
6539
6540 /* Can return -ENOMEM */
6541 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6542                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6543                       u64 owner, u64 offset, int no_quota)
6544 {
6545         int ret;
6546         struct btrfs_fs_info *fs_info = root->fs_info;
6547
6548         if (btrfs_test_is_dummy_root(root))
6549                 return 0;
6550
6551         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6552
6553         /*
6554          * tree log blocks never actually go into the extent allocation
6555          * tree, just update pinning info and exit early.
6556          */
6557         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6558                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6559                 /* unlocks the pinned mutex */
6560                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6561                 ret = 0;
6562         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6563                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6564                                         num_bytes,
6565                                         parent, root_objectid, (int)owner,
6566                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6567         } else {
6568                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6569                                                 num_bytes,
6570                                                 parent, root_objectid, owner,
6571                                                 offset, BTRFS_DROP_DELAYED_REF,
6572                                                 NULL, no_quota);
6573         }
6574         return ret;
6575 }
6576
6577 /*
6578  * when we wait for progress in the block group caching, its because
6579  * our allocation attempt failed at least once.  So, we must sleep
6580  * and let some progress happen before we try again.
6581  *
6582  * This function will sleep at least once waiting for new free space to
6583  * show up, and then it will check the block group free space numbers
6584  * for our min num_bytes.  Another option is to have it go ahead
6585  * and look in the rbtree for a free extent of a given size, but this
6586  * is a good start.
6587  *
6588  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6589  * any of the information in this block group.
6590  */
6591 static noinline void
6592 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6593                                 u64 num_bytes)
6594 {
6595         struct btrfs_caching_control *caching_ctl;
6596
6597         caching_ctl = get_caching_control(cache);
6598         if (!caching_ctl)
6599                 return;
6600
6601         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6602                    (cache->free_space_ctl->free_space >= num_bytes));
6603
6604         put_caching_control(caching_ctl);
6605 }
6606
6607 static noinline int
6608 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6609 {
6610         struct btrfs_caching_control *caching_ctl;
6611         int ret = 0;
6612
6613         caching_ctl = get_caching_control(cache);
6614         if (!caching_ctl)
6615                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6616
6617         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6618         if (cache->cached == BTRFS_CACHE_ERROR)
6619                 ret = -EIO;
6620         put_caching_control(caching_ctl);
6621         return ret;
6622 }
6623
6624 int __get_raid_index(u64 flags)
6625 {
6626         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6627                 return BTRFS_RAID_RAID10;
6628         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6629                 return BTRFS_RAID_RAID1;
6630         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6631                 return BTRFS_RAID_DUP;
6632         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6633                 return BTRFS_RAID_RAID0;
6634         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6635                 return BTRFS_RAID_RAID5;
6636         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6637                 return BTRFS_RAID_RAID6;
6638
6639         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6640 }
6641
6642 int get_block_group_index(struct btrfs_block_group_cache *cache)
6643 {
6644         return __get_raid_index(cache->flags);
6645 }
6646
6647 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6648         [BTRFS_RAID_RAID10]     = "raid10",
6649         [BTRFS_RAID_RAID1]      = "raid1",
6650         [BTRFS_RAID_DUP]        = "dup",
6651         [BTRFS_RAID_RAID0]      = "raid0",
6652         [BTRFS_RAID_SINGLE]     = "single",
6653         [BTRFS_RAID_RAID5]      = "raid5",
6654         [BTRFS_RAID_RAID6]      = "raid6",
6655 };
6656
6657 static const char *get_raid_name(enum btrfs_raid_types type)
6658 {
6659         if (type >= BTRFS_NR_RAID_TYPES)
6660                 return NULL;
6661
6662         return btrfs_raid_type_names[type];
6663 }
6664
6665 enum btrfs_loop_type {
6666         LOOP_CACHING_NOWAIT = 0,
6667         LOOP_CACHING_WAIT = 1,
6668         LOOP_ALLOC_CHUNK = 2,
6669         LOOP_NO_EMPTY_SIZE = 3,
6670 };
6671
6672 static inline void
6673 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6674                        int delalloc)
6675 {
6676         if (delalloc)
6677                 down_read(&cache->data_rwsem);
6678 }
6679
6680 static inline void
6681 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6682                        int delalloc)
6683 {
6684         btrfs_get_block_group(cache);
6685         if (delalloc)
6686                 down_read(&cache->data_rwsem);
6687 }
6688
6689 static struct btrfs_block_group_cache *
6690 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6691                    struct btrfs_free_cluster *cluster,
6692                    int delalloc)
6693 {
6694         struct btrfs_block_group_cache *used_bg;
6695         bool locked = false;
6696 again:
6697         spin_lock(&cluster->refill_lock);
6698         if (locked) {
6699                 if (used_bg == cluster->block_group)
6700                         return used_bg;
6701
6702                 up_read(&used_bg->data_rwsem);
6703                 btrfs_put_block_group(used_bg);
6704         }
6705
6706         used_bg = cluster->block_group;
6707         if (!used_bg)
6708                 return NULL;
6709
6710         if (used_bg == block_group)
6711                 return used_bg;
6712
6713         btrfs_get_block_group(used_bg);
6714
6715         if (!delalloc)
6716                 return used_bg;
6717
6718         if (down_read_trylock(&used_bg->data_rwsem))
6719                 return used_bg;
6720
6721         spin_unlock(&cluster->refill_lock);
6722         down_read(&used_bg->data_rwsem);
6723         locked = true;
6724         goto again;
6725 }
6726
6727 static inline void
6728 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6729                          int delalloc)
6730 {
6731         if (delalloc)
6732                 up_read(&cache->data_rwsem);
6733         btrfs_put_block_group(cache);
6734 }
6735
6736 /*
6737  * walks the btree of allocated extents and find a hole of a given size.
6738  * The key ins is changed to record the hole:
6739  * ins->objectid == start position
6740  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6741  * ins->offset == the size of the hole.
6742  * Any available blocks before search_start are skipped.
6743  *
6744  * If there is no suitable free space, we will record the max size of
6745  * the free space extent currently.
6746  */
6747 static noinline int find_free_extent(struct btrfs_root *orig_root,
6748                                      u64 num_bytes, u64 empty_size,
6749                                      u64 hint_byte, struct btrfs_key *ins,
6750                                      u64 flags, int delalloc)
6751 {
6752         int ret = 0;
6753         struct btrfs_root *root = orig_root->fs_info->extent_root;
6754         struct btrfs_free_cluster *last_ptr = NULL;
6755         struct btrfs_block_group_cache *block_group = NULL;
6756         u64 search_start = 0;
6757         u64 max_extent_size = 0;
6758         int empty_cluster = 2 * 1024 * 1024;
6759         struct btrfs_space_info *space_info;
6760         int loop = 0;
6761         int index = __get_raid_index(flags);
6762         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6763                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6764         bool failed_cluster_refill = false;
6765         bool failed_alloc = false;
6766         bool use_cluster = true;
6767         bool have_caching_bg = false;
6768
6769         WARN_ON(num_bytes < root->sectorsize);
6770         ins->type = BTRFS_EXTENT_ITEM_KEY;
6771         ins->objectid = 0;
6772         ins->offset = 0;
6773
6774         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6775
6776         space_info = __find_space_info(root->fs_info, flags);
6777         if (!space_info) {
6778                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6779                 return -ENOSPC;
6780         }
6781
6782         /*
6783          * If the space info is for both data and metadata it means we have a
6784          * small filesystem and we can't use the clustering stuff.
6785          */
6786         if (btrfs_mixed_space_info(space_info))
6787                 use_cluster = false;
6788
6789         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6790                 last_ptr = &root->fs_info->meta_alloc_cluster;
6791                 if (!btrfs_test_opt(root, SSD))
6792                         empty_cluster = 64 * 1024;
6793         }
6794
6795         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6796             btrfs_test_opt(root, SSD)) {
6797                 last_ptr = &root->fs_info->data_alloc_cluster;
6798         }
6799
6800         if (last_ptr) {
6801                 spin_lock(&last_ptr->lock);
6802                 if (last_ptr->block_group)
6803                         hint_byte = last_ptr->window_start;
6804                 spin_unlock(&last_ptr->lock);
6805         }
6806
6807         search_start = max(search_start, first_logical_byte(root, 0));
6808         search_start = max(search_start, hint_byte);
6809
6810         if (!last_ptr)
6811                 empty_cluster = 0;
6812
6813         if (search_start == hint_byte) {
6814                 block_group = btrfs_lookup_block_group(root->fs_info,
6815                                                        search_start);
6816                 /*
6817                  * we don't want to use the block group if it doesn't match our
6818                  * allocation bits, or if its not cached.
6819                  *
6820                  * However if we are re-searching with an ideal block group
6821                  * picked out then we don't care that the block group is cached.
6822                  */
6823                 if (block_group && block_group_bits(block_group, flags) &&
6824                     block_group->cached != BTRFS_CACHE_NO) {
6825                         down_read(&space_info->groups_sem);
6826                         if (list_empty(&block_group->list) ||
6827                             block_group->ro) {
6828                                 /*
6829                                  * someone is removing this block group,
6830                                  * we can't jump into the have_block_group
6831                                  * target because our list pointers are not
6832                                  * valid
6833                                  */
6834                                 btrfs_put_block_group(block_group);
6835                                 up_read(&space_info->groups_sem);
6836                         } else {
6837                                 index = get_block_group_index(block_group);
6838                                 btrfs_lock_block_group(block_group, delalloc);
6839                                 goto have_block_group;
6840                         }
6841                 } else if (block_group) {
6842                         btrfs_put_block_group(block_group);
6843                 }
6844         }
6845 search:
6846         have_caching_bg = false;
6847         down_read(&space_info->groups_sem);
6848         list_for_each_entry(block_group, &space_info->block_groups[index],
6849                             list) {
6850                 u64 offset;
6851                 int cached;
6852
6853                 btrfs_grab_block_group(block_group, delalloc);
6854                 search_start = block_group->key.objectid;
6855
6856                 /*
6857                  * this can happen if we end up cycling through all the
6858                  * raid types, but we want to make sure we only allocate
6859                  * for the proper type.
6860                  */
6861                 if (!block_group_bits(block_group, flags)) {
6862                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6863                                 BTRFS_BLOCK_GROUP_RAID1 |
6864                                 BTRFS_BLOCK_GROUP_RAID5 |
6865                                 BTRFS_BLOCK_GROUP_RAID6 |
6866                                 BTRFS_BLOCK_GROUP_RAID10;
6867
6868                         /*
6869                          * if they asked for extra copies and this block group
6870                          * doesn't provide them, bail.  This does allow us to
6871                          * fill raid0 from raid1.
6872                          */
6873                         if ((flags & extra) && !(block_group->flags & extra))
6874                                 goto loop;
6875                 }
6876
6877 have_block_group:
6878                 cached = block_group_cache_done(block_group);
6879                 if (unlikely(!cached)) {
6880                         ret = cache_block_group(block_group, 0);
6881                         BUG_ON(ret < 0);
6882                         ret = 0;
6883                 }
6884
6885                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6886                         goto loop;
6887                 if (unlikely(block_group->ro))
6888                         goto loop;
6889
6890                 /*
6891                  * Ok we want to try and use the cluster allocator, so
6892                  * lets look there
6893                  */
6894                 if (last_ptr) {
6895                         struct btrfs_block_group_cache *used_block_group;
6896                         unsigned long aligned_cluster;
6897                         /*
6898                          * the refill lock keeps out other
6899                          * people trying to start a new cluster
6900                          */
6901                         used_block_group = btrfs_lock_cluster(block_group,
6902                                                               last_ptr,
6903                                                               delalloc);
6904                         if (!used_block_group)
6905                                 goto refill_cluster;
6906
6907                         if (used_block_group != block_group &&
6908                             (used_block_group->ro ||
6909                              !block_group_bits(used_block_group, flags)))
6910                                 goto release_cluster;
6911
6912                         offset = btrfs_alloc_from_cluster(used_block_group,
6913                                                 last_ptr,
6914                                                 num_bytes,
6915                                                 used_block_group->key.objectid,
6916                                                 &max_extent_size);
6917                         if (offset) {
6918                                 /* we have a block, we're done */
6919                                 spin_unlock(&last_ptr->refill_lock);
6920                                 trace_btrfs_reserve_extent_cluster(root,
6921                                                 used_block_group,
6922                                                 search_start, num_bytes);
6923                                 if (used_block_group != block_group) {
6924                                         btrfs_release_block_group(block_group,
6925                                                                   delalloc);
6926                                         block_group = used_block_group;
6927                                 }
6928                                 goto checks;
6929                         }
6930
6931                         WARN_ON(last_ptr->block_group != used_block_group);
6932 release_cluster:
6933                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6934                          * set up a new clusters, so lets just skip it
6935                          * and let the allocator find whatever block
6936                          * it can find.  If we reach this point, we
6937                          * will have tried the cluster allocator
6938                          * plenty of times and not have found
6939                          * anything, so we are likely way too
6940                          * fragmented for the clustering stuff to find
6941                          * anything.
6942                          *
6943                          * However, if the cluster is taken from the
6944                          * current block group, release the cluster
6945                          * first, so that we stand a better chance of
6946                          * succeeding in the unclustered
6947                          * allocation.  */
6948                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6949                             used_block_group != block_group) {
6950                                 spin_unlock(&last_ptr->refill_lock);
6951                                 btrfs_release_block_group(used_block_group,
6952                                                           delalloc);
6953                                 goto unclustered_alloc;
6954                         }
6955
6956                         /*
6957                          * this cluster didn't work out, free it and
6958                          * start over
6959                          */
6960                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6961
6962                         if (used_block_group != block_group)
6963                                 btrfs_release_block_group(used_block_group,
6964                                                           delalloc);
6965 refill_cluster:
6966                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6967                                 spin_unlock(&last_ptr->refill_lock);
6968                                 goto unclustered_alloc;
6969                         }
6970
6971                         aligned_cluster = max_t(unsigned long,
6972                                                 empty_cluster + empty_size,
6973                                               block_group->full_stripe_len);
6974
6975                         /* allocate a cluster in this block group */
6976                         ret = btrfs_find_space_cluster(root, block_group,
6977                                                        last_ptr, search_start,
6978                                                        num_bytes,
6979                                                        aligned_cluster);
6980                         if (ret == 0) {
6981                                 /*
6982                                  * now pull our allocation out of this
6983                                  * cluster
6984                                  */
6985                                 offset = btrfs_alloc_from_cluster(block_group,
6986                                                         last_ptr,
6987                                                         num_bytes,
6988                                                         search_start,
6989                                                         &max_extent_size);
6990                                 if (offset) {
6991                                         /* we found one, proceed */
6992                                         spin_unlock(&last_ptr->refill_lock);
6993                                         trace_btrfs_reserve_extent_cluster(root,
6994                                                 block_group, search_start,
6995                                                 num_bytes);
6996                                         goto checks;
6997                                 }
6998                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6999                                    && !failed_cluster_refill) {
7000                                 spin_unlock(&last_ptr->refill_lock);
7001
7002                                 failed_cluster_refill = true;
7003                                 wait_block_group_cache_progress(block_group,
7004                                        num_bytes + empty_cluster + empty_size);
7005                                 goto have_block_group;
7006                         }
7007
7008                         /*
7009                          * at this point we either didn't find a cluster
7010                          * or we weren't able to allocate a block from our
7011                          * cluster.  Free the cluster we've been trying
7012                          * to use, and go to the next block group
7013                          */
7014                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7015                         spin_unlock(&last_ptr->refill_lock);
7016                         goto loop;
7017                 }
7018
7019 unclustered_alloc:
7020                 spin_lock(&block_group->free_space_ctl->tree_lock);
7021                 if (cached &&
7022                     block_group->free_space_ctl->free_space <
7023                     num_bytes + empty_cluster + empty_size) {
7024                         if (block_group->free_space_ctl->free_space >
7025                             max_extent_size)
7026                                 max_extent_size =
7027                                         block_group->free_space_ctl->free_space;
7028                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7029                         goto loop;
7030                 }
7031                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7032
7033                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7034                                                     num_bytes, empty_size,
7035                                                     &max_extent_size);
7036                 /*
7037                  * If we didn't find a chunk, and we haven't failed on this
7038                  * block group before, and this block group is in the middle of
7039                  * caching and we are ok with waiting, then go ahead and wait
7040                  * for progress to be made, and set failed_alloc to true.
7041                  *
7042                  * If failed_alloc is true then we've already waited on this
7043                  * block group once and should move on to the next block group.
7044                  */
7045                 if (!offset && !failed_alloc && !cached &&
7046                     loop > LOOP_CACHING_NOWAIT) {
7047                         wait_block_group_cache_progress(block_group,
7048                                                 num_bytes + empty_size);
7049                         failed_alloc = true;
7050                         goto have_block_group;
7051                 } else if (!offset) {
7052                         if (!cached)
7053                                 have_caching_bg = true;
7054                         goto loop;
7055                 }
7056 checks:
7057                 search_start = ALIGN(offset, root->stripesize);
7058
7059                 /* move on to the next group */
7060                 if (search_start + num_bytes >
7061                     block_group->key.objectid + block_group->key.offset) {
7062                         btrfs_add_free_space(block_group, offset, num_bytes);
7063                         goto loop;
7064                 }
7065
7066                 if (offset < search_start)
7067                         btrfs_add_free_space(block_group, offset,
7068                                              search_start - offset);
7069                 BUG_ON(offset > search_start);
7070
7071                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7072                                                   alloc_type, delalloc);
7073                 if (ret == -EAGAIN) {
7074                         btrfs_add_free_space(block_group, offset, num_bytes);
7075                         goto loop;
7076                 }
7077
7078                 /* we are all good, lets return */
7079                 ins->objectid = search_start;
7080                 ins->offset = num_bytes;
7081
7082                 trace_btrfs_reserve_extent(orig_root, block_group,
7083                                            search_start, num_bytes);
7084                 btrfs_release_block_group(block_group, delalloc);
7085                 break;
7086 loop:
7087                 failed_cluster_refill = false;
7088                 failed_alloc = false;
7089                 BUG_ON(index != get_block_group_index(block_group));
7090                 btrfs_release_block_group(block_group, delalloc);
7091         }
7092         up_read(&space_info->groups_sem);
7093
7094         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7095                 goto search;
7096
7097         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7098                 goto search;
7099
7100         /*
7101          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7102          *                      caching kthreads as we move along
7103          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7104          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7105          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7106          *                      again
7107          */
7108         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7109                 index = 0;
7110                 loop++;
7111                 if (loop == LOOP_ALLOC_CHUNK) {
7112                         struct btrfs_trans_handle *trans;
7113                         int exist = 0;
7114
7115                         trans = current->journal_info;
7116                         if (trans)
7117                                 exist = 1;
7118                         else
7119                                 trans = btrfs_join_transaction(root);
7120
7121                         if (IS_ERR(trans)) {
7122                                 ret = PTR_ERR(trans);
7123                                 goto out;
7124                         }
7125
7126                         ret = do_chunk_alloc(trans, root, flags,
7127                                              CHUNK_ALLOC_FORCE);
7128                         /*
7129                          * Do not bail out on ENOSPC since we
7130                          * can do more things.
7131                          */
7132                         if (ret < 0 && ret != -ENOSPC)
7133                                 btrfs_abort_transaction(trans,
7134                                                         root, ret);
7135                         else
7136                                 ret = 0;
7137                         if (!exist)
7138                                 btrfs_end_transaction(trans, root);
7139                         if (ret)
7140                                 goto out;
7141                 }
7142
7143                 if (loop == LOOP_NO_EMPTY_SIZE) {
7144                         empty_size = 0;
7145                         empty_cluster = 0;
7146                 }
7147
7148                 goto search;
7149         } else if (!ins->objectid) {
7150                 ret = -ENOSPC;
7151         } else if (ins->objectid) {
7152                 ret = 0;
7153         }
7154 out:
7155         if (ret == -ENOSPC)
7156                 ins->offset = max_extent_size;
7157         return ret;
7158 }
7159
7160 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7161                             int dump_block_groups)
7162 {
7163         struct btrfs_block_group_cache *cache;
7164         int index = 0;
7165
7166         spin_lock(&info->lock);
7167         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7168                info->flags,
7169                info->total_bytes - info->bytes_used - info->bytes_pinned -
7170                info->bytes_reserved - info->bytes_readonly,
7171                (info->full) ? "" : "not ");
7172         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7173                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7174                info->total_bytes, info->bytes_used, info->bytes_pinned,
7175                info->bytes_reserved, info->bytes_may_use,
7176                info->bytes_readonly);
7177         spin_unlock(&info->lock);
7178
7179         if (!dump_block_groups)
7180                 return;
7181
7182         down_read(&info->groups_sem);
7183 again:
7184         list_for_each_entry(cache, &info->block_groups[index], list) {
7185                 spin_lock(&cache->lock);
7186                 printk(KERN_INFO "BTRFS: "
7187                            "block group %llu has %llu bytes, "
7188                            "%llu used %llu pinned %llu reserved %s\n",
7189                        cache->key.objectid, cache->key.offset,
7190                        btrfs_block_group_used(&cache->item), cache->pinned,
7191                        cache->reserved, cache->ro ? "[readonly]" : "");
7192                 btrfs_dump_free_space(cache, bytes);
7193                 spin_unlock(&cache->lock);
7194         }
7195         if (++index < BTRFS_NR_RAID_TYPES)
7196                 goto again;
7197         up_read(&info->groups_sem);
7198 }
7199
7200 int btrfs_reserve_extent(struct btrfs_root *root,
7201                          u64 num_bytes, u64 min_alloc_size,
7202                          u64 empty_size, u64 hint_byte,
7203                          struct btrfs_key *ins, int is_data, int delalloc)
7204 {
7205         bool final_tried = false;
7206         u64 flags;
7207         int ret;
7208
7209         flags = btrfs_get_alloc_profile(root, is_data);
7210 again:
7211         WARN_ON(num_bytes < root->sectorsize);
7212         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7213                                flags, delalloc);
7214
7215         if (ret == -ENOSPC) {
7216                 if (!final_tried && ins->offset) {
7217                         num_bytes = min(num_bytes >> 1, ins->offset);
7218                         num_bytes = round_down(num_bytes, root->sectorsize);
7219                         num_bytes = max(num_bytes, min_alloc_size);
7220                         if (num_bytes == min_alloc_size)
7221                                 final_tried = true;
7222                         goto again;
7223                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7224                         struct btrfs_space_info *sinfo;
7225
7226                         sinfo = __find_space_info(root->fs_info, flags);
7227                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7228                                 flags, num_bytes);
7229                         if (sinfo)
7230                                 dump_space_info(sinfo, num_bytes, 1);
7231                 }
7232         }
7233
7234         return ret;
7235 }
7236
7237 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7238                                         u64 start, u64 len,
7239                                         int pin, int delalloc)
7240 {
7241         struct btrfs_block_group_cache *cache;
7242         int ret = 0;
7243
7244         cache = btrfs_lookup_block_group(root->fs_info, start);
7245         if (!cache) {
7246                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7247                         start);
7248                 return -ENOSPC;
7249         }
7250
7251         if (pin)
7252                 pin_down_extent(root, cache, start, len, 1);
7253         else {
7254                 if (btrfs_test_opt(root, DISCARD))
7255                         ret = btrfs_discard_extent(root, start, len, NULL);
7256                 btrfs_add_free_space(cache, start, len);
7257                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7258         }
7259
7260         btrfs_put_block_group(cache);
7261
7262         trace_btrfs_reserved_extent_free(root, start, len);
7263
7264         return ret;
7265 }
7266
7267 int btrfs_free_reserved_extent(struct btrfs_root *root,
7268                                u64 start, u64 len, int delalloc)
7269 {
7270         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7271 }
7272
7273 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7274                                        u64 start, u64 len)
7275 {
7276         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7277 }
7278
7279 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7280                                       struct btrfs_root *root,
7281                                       u64 parent, u64 root_objectid,
7282                                       u64 flags, u64 owner, u64 offset,
7283                                       struct btrfs_key *ins, int ref_mod)
7284 {
7285         int ret;
7286         struct btrfs_fs_info *fs_info = root->fs_info;
7287         struct btrfs_extent_item *extent_item;
7288         struct btrfs_extent_inline_ref *iref;
7289         struct btrfs_path *path;
7290         struct extent_buffer *leaf;
7291         int type;
7292         u32 size;
7293
7294         if (parent > 0)
7295                 type = BTRFS_SHARED_DATA_REF_KEY;
7296         else
7297                 type = BTRFS_EXTENT_DATA_REF_KEY;
7298
7299         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7300
7301         path = btrfs_alloc_path();
7302         if (!path)
7303                 return -ENOMEM;
7304
7305         path->leave_spinning = 1;
7306         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7307                                       ins, size);
7308         if (ret) {
7309                 btrfs_free_path(path);
7310                 return ret;
7311         }
7312
7313         leaf = path->nodes[0];
7314         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7315                                      struct btrfs_extent_item);
7316         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7317         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7318         btrfs_set_extent_flags(leaf, extent_item,
7319                                flags | BTRFS_EXTENT_FLAG_DATA);
7320
7321         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7322         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7323         if (parent > 0) {
7324                 struct btrfs_shared_data_ref *ref;
7325                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7326                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7327                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7328         } else {
7329                 struct btrfs_extent_data_ref *ref;
7330                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7331                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7332                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7333                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7334                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7335         }
7336
7337         btrfs_mark_buffer_dirty(path->nodes[0]);
7338         btrfs_free_path(path);
7339
7340         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7341         if (ret) { /* -ENOENT, logic error */
7342                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7343                         ins->objectid, ins->offset);
7344                 BUG();
7345         }
7346         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7347         return ret;
7348 }
7349
7350 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7351                                      struct btrfs_root *root,
7352                                      u64 parent, u64 root_objectid,
7353                                      u64 flags, struct btrfs_disk_key *key,
7354                                      int level, struct btrfs_key *ins,
7355                                      int no_quota)
7356 {
7357         int ret;
7358         struct btrfs_fs_info *fs_info = root->fs_info;
7359         struct btrfs_extent_item *extent_item;
7360         struct btrfs_tree_block_info *block_info;
7361         struct btrfs_extent_inline_ref *iref;
7362         struct btrfs_path *path;
7363         struct extent_buffer *leaf;
7364         u32 size = sizeof(*extent_item) + sizeof(*iref);
7365         u64 num_bytes = ins->offset;
7366         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7367                                                  SKINNY_METADATA);
7368
7369         if (!skinny_metadata)
7370                 size += sizeof(*block_info);
7371
7372         path = btrfs_alloc_path();
7373         if (!path) {
7374                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7375                                                    root->nodesize);
7376                 return -ENOMEM;
7377         }
7378
7379         path->leave_spinning = 1;
7380         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7381                                       ins, size);
7382         if (ret) {
7383                 btrfs_free_path(path);
7384                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7385                                                    root->nodesize);
7386                 return ret;
7387         }
7388
7389         leaf = path->nodes[0];
7390         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7391                                      struct btrfs_extent_item);
7392         btrfs_set_extent_refs(leaf, extent_item, 1);
7393         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7394         btrfs_set_extent_flags(leaf, extent_item,
7395                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7396
7397         if (skinny_metadata) {
7398                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7399                 num_bytes = root->nodesize;
7400         } else {
7401                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7402                 btrfs_set_tree_block_key(leaf, block_info, key);
7403                 btrfs_set_tree_block_level(leaf, block_info, level);
7404                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7405         }
7406
7407         if (parent > 0) {
7408                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7409                 btrfs_set_extent_inline_ref_type(leaf, iref,
7410                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7411                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7412         } else {
7413                 btrfs_set_extent_inline_ref_type(leaf, iref,
7414                                                  BTRFS_TREE_BLOCK_REF_KEY);
7415                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7416         }
7417
7418         btrfs_mark_buffer_dirty(leaf);
7419         btrfs_free_path(path);
7420
7421         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7422                                  1);
7423         if (ret) { /* -ENOENT, logic error */
7424                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7425                         ins->objectid, ins->offset);
7426                 BUG();
7427         }
7428
7429         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7430         return ret;
7431 }
7432
7433 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7434                                      struct btrfs_root *root,
7435                                      u64 root_objectid, u64 owner,
7436                                      u64 offset, struct btrfs_key *ins)
7437 {
7438         int ret;
7439
7440         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7441
7442         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7443                                          ins->offset, 0,
7444                                          root_objectid, owner, offset,
7445                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7446         return ret;
7447 }
7448
7449 /*
7450  * this is used by the tree logging recovery code.  It records that
7451  * an extent has been allocated and makes sure to clear the free
7452  * space cache bits as well
7453  */
7454 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7455                                    struct btrfs_root *root,
7456                                    u64 root_objectid, u64 owner, u64 offset,
7457                                    struct btrfs_key *ins)
7458 {
7459         int ret;
7460         struct btrfs_block_group_cache *block_group;
7461
7462         /*
7463          * Mixed block groups will exclude before processing the log so we only
7464          * need to do the exlude dance if this fs isn't mixed.
7465          */
7466         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7467                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7468                 if (ret)
7469                         return ret;
7470         }
7471
7472         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7473         if (!block_group)
7474                 return -EINVAL;
7475
7476         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7477                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7478         BUG_ON(ret); /* logic error */
7479         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7480                                          0, owner, offset, ins, 1);
7481         btrfs_put_block_group(block_group);
7482         return ret;
7483 }
7484
7485 static struct extent_buffer *
7486 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7487                       u64 bytenr, int level)
7488 {
7489         struct extent_buffer *buf;
7490
7491         buf = btrfs_find_create_tree_block(root, bytenr);
7492         if (!buf)
7493                 return ERR_PTR(-ENOMEM);
7494         btrfs_set_header_generation(buf, trans->transid);
7495         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7496         btrfs_tree_lock(buf);
7497         clean_tree_block(trans, root->fs_info, buf);
7498         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7499
7500         btrfs_set_lock_blocking(buf);
7501         btrfs_set_buffer_uptodate(buf);
7502
7503         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7504                 buf->log_index = root->log_transid % 2;
7505                 /*
7506                  * we allow two log transactions at a time, use different
7507                  * EXENT bit to differentiate dirty pages.
7508                  */
7509                 if (buf->log_index == 0)
7510                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7511                                         buf->start + buf->len - 1, GFP_NOFS);
7512                 else
7513                         set_extent_new(&root->dirty_log_pages, buf->start,
7514                                         buf->start + buf->len - 1, GFP_NOFS);
7515         } else {
7516                 buf->log_index = -1;
7517                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7518                          buf->start + buf->len - 1, GFP_NOFS);
7519         }
7520         trans->blocks_used++;
7521         /* this returns a buffer locked for blocking */
7522         return buf;
7523 }
7524
7525 static struct btrfs_block_rsv *
7526 use_block_rsv(struct btrfs_trans_handle *trans,
7527               struct btrfs_root *root, u32 blocksize)
7528 {
7529         struct btrfs_block_rsv *block_rsv;
7530         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7531         int ret;
7532         bool global_updated = false;
7533
7534         block_rsv = get_block_rsv(trans, root);
7535
7536         if (unlikely(block_rsv->size == 0))
7537                 goto try_reserve;
7538 again:
7539         ret = block_rsv_use_bytes(block_rsv, blocksize);
7540         if (!ret)
7541                 return block_rsv;
7542
7543         if (block_rsv->failfast)
7544                 return ERR_PTR(ret);
7545
7546         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7547                 global_updated = true;
7548                 update_global_block_rsv(root->fs_info);
7549                 goto again;
7550         }
7551
7552         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7553                 static DEFINE_RATELIMIT_STATE(_rs,
7554                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7555                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7556                 if (__ratelimit(&_rs))
7557                         WARN(1, KERN_DEBUG
7558                                 "BTRFS: block rsv returned %d\n", ret);
7559         }
7560 try_reserve:
7561         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7562                                      BTRFS_RESERVE_NO_FLUSH);
7563         if (!ret)
7564                 return block_rsv;
7565         /*
7566          * If we couldn't reserve metadata bytes try and use some from
7567          * the global reserve if its space type is the same as the global
7568          * reservation.
7569          */
7570         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7571             block_rsv->space_info == global_rsv->space_info) {
7572                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7573                 if (!ret)
7574                         return global_rsv;
7575         }
7576         return ERR_PTR(ret);
7577 }
7578
7579 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7580                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7581 {
7582         block_rsv_add_bytes(block_rsv, blocksize, 0);
7583         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7584 }
7585
7586 /*
7587  * finds a free extent and does all the dirty work required for allocation
7588  * returns the key for the extent through ins, and a tree buffer for
7589  * the first block of the extent through buf.
7590  *
7591  * returns the tree buffer or an ERR_PTR on error.
7592  */
7593 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7594                                         struct btrfs_root *root,
7595                                         u64 parent, u64 root_objectid,
7596                                         struct btrfs_disk_key *key, int level,
7597                                         u64 hint, u64 empty_size)
7598 {
7599         struct btrfs_key ins;
7600         struct btrfs_block_rsv *block_rsv;
7601         struct extent_buffer *buf;
7602         struct btrfs_delayed_extent_op *extent_op;
7603         u64 flags = 0;
7604         int ret;
7605         u32 blocksize = root->nodesize;
7606         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7607                                                  SKINNY_METADATA);
7608
7609         if (btrfs_test_is_dummy_root(root)) {
7610                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7611                                             level);
7612                 if (!IS_ERR(buf))
7613                         root->alloc_bytenr += blocksize;
7614                 return buf;
7615         }
7616
7617         block_rsv = use_block_rsv(trans, root, blocksize);
7618         if (IS_ERR(block_rsv))
7619                 return ERR_CAST(block_rsv);
7620
7621         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7622                                    empty_size, hint, &ins, 0, 0);
7623         if (ret)
7624                 goto out_unuse;
7625
7626         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7627         if (IS_ERR(buf)) {
7628                 ret = PTR_ERR(buf);
7629                 goto out_free_reserved;
7630         }
7631
7632         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7633                 if (parent == 0)
7634                         parent = ins.objectid;
7635                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7636         } else
7637                 BUG_ON(parent > 0);
7638
7639         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7640                 extent_op = btrfs_alloc_delayed_extent_op();
7641                 if (!extent_op) {
7642                         ret = -ENOMEM;
7643                         goto out_free_buf;
7644                 }
7645                 if (key)
7646                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7647                 else
7648                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7649                 extent_op->flags_to_set = flags;
7650                 if (skinny_metadata)
7651                         extent_op->update_key = 0;
7652                 else
7653                         extent_op->update_key = 1;
7654                 extent_op->update_flags = 1;
7655                 extent_op->is_data = 0;
7656                 extent_op->level = level;
7657
7658                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7659                                                  ins.objectid, ins.offset,
7660                                                  parent, root_objectid, level,
7661                                                  BTRFS_ADD_DELAYED_EXTENT,
7662                                                  extent_op, 0);
7663                 if (ret)
7664                         goto out_free_delayed;
7665         }
7666         return buf;
7667
7668 out_free_delayed:
7669         btrfs_free_delayed_extent_op(extent_op);
7670 out_free_buf:
7671         free_extent_buffer(buf);
7672 out_free_reserved:
7673         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7674 out_unuse:
7675         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7676         return ERR_PTR(ret);
7677 }
7678
7679 struct walk_control {
7680         u64 refs[BTRFS_MAX_LEVEL];
7681         u64 flags[BTRFS_MAX_LEVEL];
7682         struct btrfs_key update_progress;
7683         int stage;
7684         int level;
7685         int shared_level;
7686         int update_ref;
7687         int keep_locks;
7688         int reada_slot;
7689         int reada_count;
7690         int for_reloc;
7691 };
7692
7693 #define DROP_REFERENCE  1
7694 #define UPDATE_BACKREF  2
7695
7696 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7697                                      struct btrfs_root *root,
7698                                      struct walk_control *wc,
7699                                      struct btrfs_path *path)
7700 {
7701         u64 bytenr;
7702         u64 generation;
7703         u64 refs;
7704         u64 flags;
7705         u32 nritems;
7706         u32 blocksize;
7707         struct btrfs_key key;
7708         struct extent_buffer *eb;
7709         int ret;
7710         int slot;
7711         int nread = 0;
7712
7713         if (path->slots[wc->level] < wc->reada_slot) {
7714                 wc->reada_count = wc->reada_count * 2 / 3;
7715                 wc->reada_count = max(wc->reada_count, 2);
7716         } else {
7717                 wc->reada_count = wc->reada_count * 3 / 2;
7718                 wc->reada_count = min_t(int, wc->reada_count,
7719                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7720         }
7721
7722         eb = path->nodes[wc->level];
7723         nritems = btrfs_header_nritems(eb);
7724         blocksize = root->nodesize;
7725
7726         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7727                 if (nread >= wc->reada_count)
7728                         break;
7729
7730                 cond_resched();
7731                 bytenr = btrfs_node_blockptr(eb, slot);
7732                 generation = btrfs_node_ptr_generation(eb, slot);
7733
7734                 if (slot == path->slots[wc->level])
7735                         goto reada;
7736
7737                 if (wc->stage == UPDATE_BACKREF &&
7738                     generation <= root->root_key.offset)
7739                         continue;
7740
7741                 /* We don't lock the tree block, it's OK to be racy here */
7742                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7743                                                wc->level - 1, 1, &refs,
7744                                                &flags);
7745                 /* We don't care about errors in readahead. */
7746                 if (ret < 0)
7747                         continue;
7748                 BUG_ON(refs == 0);
7749
7750                 if (wc->stage == DROP_REFERENCE) {
7751                         if (refs == 1)
7752                                 goto reada;
7753
7754                         if (wc->level == 1 &&
7755                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7756                                 continue;
7757                         if (!wc->update_ref ||
7758                             generation <= root->root_key.offset)
7759                                 continue;
7760                         btrfs_node_key_to_cpu(eb, &key, slot);
7761                         ret = btrfs_comp_cpu_keys(&key,
7762                                                   &wc->update_progress);
7763                         if (ret < 0)
7764                                 continue;
7765                 } else {
7766                         if (wc->level == 1 &&
7767                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7768                                 continue;
7769                 }
7770 reada:
7771                 readahead_tree_block(root, bytenr);
7772                 nread++;
7773         }
7774         wc->reada_slot = slot;
7775 }
7776
7777 /*
7778  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7779  * for later qgroup accounting.
7780  *
7781  * Current, this function does nothing.
7782  */
7783 static int account_leaf_items(struct btrfs_trans_handle *trans,
7784                               struct btrfs_root *root,
7785                               struct extent_buffer *eb)
7786 {
7787         int nr = btrfs_header_nritems(eb);
7788         int i, extent_type;
7789         struct btrfs_key key;
7790         struct btrfs_file_extent_item *fi;
7791         u64 bytenr, num_bytes;
7792
7793         for (i = 0; i < nr; i++) {
7794                 btrfs_item_key_to_cpu(eb, &key, i);
7795
7796                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7797                         continue;
7798
7799                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7800                 /* filter out non qgroup-accountable extents  */
7801                 extent_type = btrfs_file_extent_type(eb, fi);
7802
7803                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7804                         continue;
7805
7806                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7807                 if (!bytenr)
7808                         continue;
7809
7810                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7811         }
7812         return 0;
7813 }
7814
7815 /*
7816  * Walk up the tree from the bottom, freeing leaves and any interior
7817  * nodes which have had all slots visited. If a node (leaf or
7818  * interior) is freed, the node above it will have it's slot
7819  * incremented. The root node will never be freed.
7820  *
7821  * At the end of this function, we should have a path which has all
7822  * slots incremented to the next position for a search. If we need to
7823  * read a new node it will be NULL and the node above it will have the
7824  * correct slot selected for a later read.
7825  *
7826  * If we increment the root nodes slot counter past the number of
7827  * elements, 1 is returned to signal completion of the search.
7828  */
7829 static int adjust_slots_upwards(struct btrfs_root *root,
7830                                 struct btrfs_path *path, int root_level)
7831 {
7832         int level = 0;
7833         int nr, slot;
7834         struct extent_buffer *eb;
7835
7836         if (root_level == 0)
7837                 return 1;
7838
7839         while (level <= root_level) {
7840                 eb = path->nodes[level];
7841                 nr = btrfs_header_nritems(eb);
7842                 path->slots[level]++;
7843                 slot = path->slots[level];
7844                 if (slot >= nr || level == 0) {
7845                         /*
7846                          * Don't free the root -  we will detect this
7847                          * condition after our loop and return a
7848                          * positive value for caller to stop walking the tree.
7849                          */
7850                         if (level != root_level) {
7851                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7852                                 path->locks[level] = 0;
7853
7854                                 free_extent_buffer(eb);
7855                                 path->nodes[level] = NULL;
7856                                 path->slots[level] = 0;
7857                         }
7858                 } else {
7859                         /*
7860                          * We have a valid slot to walk back down
7861                          * from. Stop here so caller can process these
7862                          * new nodes.
7863                          */
7864                         break;
7865                 }
7866
7867                 level++;
7868         }
7869
7870         eb = path->nodes[root_level];
7871         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7872                 return 1;
7873
7874         return 0;
7875 }
7876
7877 /*
7878  * root_eb is the subtree root and is locked before this function is called.
7879  * TODO: Modify this function to mark all (including complete shared node)
7880  * to dirty_extent_root to allow it get accounted in qgroup.
7881  */
7882 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7883                                   struct btrfs_root *root,
7884                                   struct extent_buffer *root_eb,
7885                                   u64 root_gen,
7886                                   int root_level)
7887 {
7888         int ret = 0;
7889         int level;
7890         struct extent_buffer *eb = root_eb;
7891         struct btrfs_path *path = NULL;
7892
7893         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7894         BUG_ON(root_eb == NULL);
7895
7896         if (!root->fs_info->quota_enabled)
7897                 return 0;
7898
7899         if (!extent_buffer_uptodate(root_eb)) {
7900                 ret = btrfs_read_buffer(root_eb, root_gen);
7901                 if (ret)
7902                         goto out;
7903         }
7904
7905         if (root_level == 0) {
7906                 ret = account_leaf_items(trans, root, root_eb);
7907                 goto out;
7908         }
7909
7910         path = btrfs_alloc_path();
7911         if (!path)
7912                 return -ENOMEM;
7913
7914         /*
7915          * Walk down the tree.  Missing extent blocks are filled in as
7916          * we go. Metadata is accounted every time we read a new
7917          * extent block.
7918          *
7919          * When we reach a leaf, we account for file extent items in it,
7920          * walk back up the tree (adjusting slot pointers as we go)
7921          * and restart the search process.
7922          */
7923         extent_buffer_get(root_eb); /* For path */
7924         path->nodes[root_level] = root_eb;
7925         path->slots[root_level] = 0;
7926         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7927 walk_down:
7928         level = root_level;
7929         while (level >= 0) {
7930                 if (path->nodes[level] == NULL) {
7931                         int parent_slot;
7932                         u64 child_gen;
7933                         u64 child_bytenr;
7934
7935                         /* We need to get child blockptr/gen from
7936                          * parent before we can read it. */
7937                         eb = path->nodes[level + 1];
7938                         parent_slot = path->slots[level + 1];
7939                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7940                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7941
7942                         eb = read_tree_block(root, child_bytenr, child_gen);
7943                         if (IS_ERR(eb)) {
7944                                 ret = PTR_ERR(eb);
7945                                 goto out;
7946                         } else if (!extent_buffer_uptodate(eb)) {
7947                                 free_extent_buffer(eb);
7948                                 ret = -EIO;
7949                                 goto out;
7950                         }
7951
7952                         path->nodes[level] = eb;
7953                         path->slots[level] = 0;
7954
7955                         btrfs_tree_read_lock(eb);
7956                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7957                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7958                 }
7959
7960                 if (level == 0) {
7961                         ret = account_leaf_items(trans, root, path->nodes[level]);
7962                         if (ret)
7963                                 goto out;
7964
7965                         /* Nonzero return here means we completed our search */
7966                         ret = adjust_slots_upwards(root, path, root_level);
7967                         if (ret)
7968                                 break;
7969
7970                         /* Restart search with new slots */
7971                         goto walk_down;
7972                 }
7973
7974                 level--;
7975         }
7976
7977         ret = 0;
7978 out:
7979         btrfs_free_path(path);
7980
7981         return ret;
7982 }
7983
7984 /*
7985  * helper to process tree block while walking down the tree.
7986  *
7987  * when wc->stage == UPDATE_BACKREF, this function updates
7988  * back refs for pointers in the block.
7989  *
7990  * NOTE: return value 1 means we should stop walking down.
7991  */
7992 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7993                                    struct btrfs_root *root,
7994                                    struct btrfs_path *path,
7995                                    struct walk_control *wc, int lookup_info)
7996 {
7997         int level = wc->level;
7998         struct extent_buffer *eb = path->nodes[level];
7999         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8000         int ret;
8001
8002         if (wc->stage == UPDATE_BACKREF &&
8003             btrfs_header_owner(eb) != root->root_key.objectid)
8004                 return 1;
8005
8006         /*
8007          * when reference count of tree block is 1, it won't increase
8008          * again. once full backref flag is set, we never clear it.
8009          */
8010         if (lookup_info &&
8011             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8012              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8013                 BUG_ON(!path->locks[level]);
8014                 ret = btrfs_lookup_extent_info(trans, root,
8015                                                eb->start, level, 1,
8016                                                &wc->refs[level],
8017                                                &wc->flags[level]);
8018                 BUG_ON(ret == -ENOMEM);
8019                 if (ret)
8020                         return ret;
8021                 BUG_ON(wc->refs[level] == 0);
8022         }
8023
8024         if (wc->stage == DROP_REFERENCE) {
8025                 if (wc->refs[level] > 1)
8026                         return 1;
8027
8028                 if (path->locks[level] && !wc->keep_locks) {
8029                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8030                         path->locks[level] = 0;
8031                 }
8032                 return 0;
8033         }
8034
8035         /* wc->stage == UPDATE_BACKREF */
8036         if (!(wc->flags[level] & flag)) {
8037                 BUG_ON(!path->locks[level]);
8038                 ret = btrfs_inc_ref(trans, root, eb, 1);
8039                 BUG_ON(ret); /* -ENOMEM */
8040                 ret = btrfs_dec_ref(trans, root, eb, 0);
8041                 BUG_ON(ret); /* -ENOMEM */
8042                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8043                                                   eb->len, flag,
8044                                                   btrfs_header_level(eb), 0);
8045                 BUG_ON(ret); /* -ENOMEM */
8046                 wc->flags[level] |= flag;
8047         }
8048
8049         /*
8050          * the block is shared by multiple trees, so it's not good to
8051          * keep the tree lock
8052          */
8053         if (path->locks[level] && level > 0) {
8054                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8055                 path->locks[level] = 0;
8056         }
8057         return 0;
8058 }
8059
8060 /*
8061  * helper to process tree block pointer.
8062  *
8063  * when wc->stage == DROP_REFERENCE, this function checks
8064  * reference count of the block pointed to. if the block
8065  * is shared and we need update back refs for the subtree
8066  * rooted at the block, this function changes wc->stage to
8067  * UPDATE_BACKREF. if the block is shared and there is no
8068  * need to update back, this function drops the reference
8069  * to the block.
8070  *
8071  * NOTE: return value 1 means we should stop walking down.
8072  */
8073 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8074                                  struct btrfs_root *root,
8075                                  struct btrfs_path *path,
8076                                  struct walk_control *wc, int *lookup_info)
8077 {
8078         u64 bytenr;
8079         u64 generation;
8080         u64 parent;
8081         u32 blocksize;
8082         struct btrfs_key key;
8083         struct extent_buffer *next;
8084         int level = wc->level;
8085         int reada = 0;
8086         int ret = 0;
8087         bool need_account = false;
8088
8089         generation = btrfs_node_ptr_generation(path->nodes[level],
8090                                                path->slots[level]);
8091         /*
8092          * if the lower level block was created before the snapshot
8093          * was created, we know there is no need to update back refs
8094          * for the subtree
8095          */
8096         if (wc->stage == UPDATE_BACKREF &&
8097             generation <= root->root_key.offset) {
8098                 *lookup_info = 1;
8099                 return 1;
8100         }
8101
8102         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8103         blocksize = root->nodesize;
8104
8105         next = btrfs_find_tree_block(root->fs_info, bytenr);
8106         if (!next) {
8107                 next = btrfs_find_create_tree_block(root, bytenr);
8108                 if (!next)
8109                         return -ENOMEM;
8110                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8111                                                level - 1);
8112                 reada = 1;
8113         }
8114         btrfs_tree_lock(next);
8115         btrfs_set_lock_blocking(next);
8116
8117         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8118                                        &wc->refs[level - 1],
8119                                        &wc->flags[level - 1]);
8120         if (ret < 0) {
8121                 btrfs_tree_unlock(next);
8122                 return ret;
8123         }
8124
8125         if (unlikely(wc->refs[level - 1] == 0)) {
8126                 btrfs_err(root->fs_info, "Missing references.");
8127                 BUG();
8128         }
8129         *lookup_info = 0;
8130
8131         if (wc->stage == DROP_REFERENCE) {
8132                 if (wc->refs[level - 1] > 1) {
8133                         need_account = true;
8134                         if (level == 1 &&
8135                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8136                                 goto skip;
8137
8138                         if (!wc->update_ref ||
8139                             generation <= root->root_key.offset)
8140                                 goto skip;
8141
8142                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8143                                               path->slots[level]);
8144                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8145                         if (ret < 0)
8146                                 goto skip;
8147
8148                         wc->stage = UPDATE_BACKREF;
8149                         wc->shared_level = level - 1;
8150                 }
8151         } else {
8152                 if (level == 1 &&
8153                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8154                         goto skip;
8155         }
8156
8157         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8158                 btrfs_tree_unlock(next);
8159                 free_extent_buffer(next);
8160                 next = NULL;
8161                 *lookup_info = 1;
8162         }
8163
8164         if (!next) {
8165                 if (reada && level == 1)
8166                         reada_walk_down(trans, root, wc, path);
8167                 next = read_tree_block(root, bytenr, generation);
8168                 if (IS_ERR(next)) {
8169                         return PTR_ERR(next);
8170                 } else if (!extent_buffer_uptodate(next)) {
8171                         free_extent_buffer(next);
8172                         return -EIO;
8173                 }
8174                 btrfs_tree_lock(next);
8175                 btrfs_set_lock_blocking(next);
8176         }
8177
8178         level--;
8179         BUG_ON(level != btrfs_header_level(next));
8180         path->nodes[level] = next;
8181         path->slots[level] = 0;
8182         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8183         wc->level = level;
8184         if (wc->level == 1)
8185                 wc->reada_slot = 0;
8186         return 0;
8187 skip:
8188         wc->refs[level - 1] = 0;
8189         wc->flags[level - 1] = 0;
8190         if (wc->stage == DROP_REFERENCE) {
8191                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8192                         parent = path->nodes[level]->start;
8193                 } else {
8194                         BUG_ON(root->root_key.objectid !=
8195                                btrfs_header_owner(path->nodes[level]));
8196                         parent = 0;
8197                 }
8198
8199                 if (need_account) {
8200                         ret = account_shared_subtree(trans, root, next,
8201                                                      generation, level - 1);
8202                         if (ret) {
8203                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8204                                         "%d accounting shared subtree. Quota "
8205                                         "is out of sync, rescan required.\n",
8206                                         root->fs_info->sb->s_id, ret);
8207                         }
8208                 }
8209                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8210                                 root->root_key.objectid, level - 1, 0, 0);
8211                 BUG_ON(ret); /* -ENOMEM */
8212         }
8213         btrfs_tree_unlock(next);
8214         free_extent_buffer(next);
8215         *lookup_info = 1;
8216         return 1;
8217 }
8218
8219 /*
8220  * helper to process tree block while walking up the tree.
8221  *
8222  * when wc->stage == DROP_REFERENCE, this function drops
8223  * reference count on the block.
8224  *
8225  * when wc->stage == UPDATE_BACKREF, this function changes
8226  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8227  * to UPDATE_BACKREF previously while processing the block.
8228  *
8229  * NOTE: return value 1 means we should stop walking up.
8230  */
8231 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8232                                  struct btrfs_root *root,
8233                                  struct btrfs_path *path,
8234                                  struct walk_control *wc)
8235 {
8236         int ret;
8237         int level = wc->level;
8238         struct extent_buffer *eb = path->nodes[level];
8239         u64 parent = 0;
8240
8241         if (wc->stage == UPDATE_BACKREF) {
8242                 BUG_ON(wc->shared_level < level);
8243                 if (level < wc->shared_level)
8244                         goto out;
8245
8246                 ret = find_next_key(path, level + 1, &wc->update_progress);
8247                 if (ret > 0)
8248                         wc->update_ref = 0;
8249
8250                 wc->stage = DROP_REFERENCE;
8251                 wc->shared_level = -1;
8252                 path->slots[level] = 0;
8253
8254                 /*
8255                  * check reference count again if the block isn't locked.
8256                  * we should start walking down the tree again if reference
8257                  * count is one.
8258                  */
8259                 if (!path->locks[level]) {
8260                         BUG_ON(level == 0);
8261                         btrfs_tree_lock(eb);
8262                         btrfs_set_lock_blocking(eb);
8263                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8264
8265                         ret = btrfs_lookup_extent_info(trans, root,
8266                                                        eb->start, level, 1,
8267                                                        &wc->refs[level],
8268                                                        &wc->flags[level]);
8269                         if (ret < 0) {
8270                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8271                                 path->locks[level] = 0;
8272                                 return ret;
8273                         }
8274                         BUG_ON(wc->refs[level] == 0);
8275                         if (wc->refs[level] == 1) {
8276                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8277                                 path->locks[level] = 0;
8278                                 return 1;
8279                         }
8280                 }
8281         }
8282
8283         /* wc->stage == DROP_REFERENCE */
8284         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8285
8286         if (wc->refs[level] == 1) {
8287                 if (level == 0) {
8288                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8289                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8290                         else
8291                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8292                         BUG_ON(ret); /* -ENOMEM */
8293                         ret = account_leaf_items(trans, root, eb);
8294                         if (ret) {
8295                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8296                                         "%d accounting leaf items. Quota "
8297                                         "is out of sync, rescan required.\n",
8298                                         root->fs_info->sb->s_id, ret);
8299                         }
8300                 }
8301                 /* make block locked assertion in clean_tree_block happy */
8302                 if (!path->locks[level] &&
8303                     btrfs_header_generation(eb) == trans->transid) {
8304                         btrfs_tree_lock(eb);
8305                         btrfs_set_lock_blocking(eb);
8306                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8307                 }
8308                 clean_tree_block(trans, root->fs_info, eb);
8309         }
8310
8311         if (eb == root->node) {
8312                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8313                         parent = eb->start;
8314                 else
8315                         BUG_ON(root->root_key.objectid !=
8316                                btrfs_header_owner(eb));
8317         } else {
8318                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8319                         parent = path->nodes[level + 1]->start;
8320                 else
8321                         BUG_ON(root->root_key.objectid !=
8322                                btrfs_header_owner(path->nodes[level + 1]));
8323         }
8324
8325         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8326 out:
8327         wc->refs[level] = 0;
8328         wc->flags[level] = 0;
8329         return 0;
8330 }
8331
8332 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8333                                    struct btrfs_root *root,
8334                                    struct btrfs_path *path,
8335                                    struct walk_control *wc)
8336 {
8337         int level = wc->level;
8338         int lookup_info = 1;
8339         int ret;
8340
8341         while (level >= 0) {
8342                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8343                 if (ret > 0)
8344                         break;
8345
8346                 if (level == 0)
8347                         break;
8348
8349                 if (path->slots[level] >=
8350                     btrfs_header_nritems(path->nodes[level]))
8351                         break;
8352
8353                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8354                 if (ret > 0) {
8355                         path->slots[level]++;
8356                         continue;
8357                 } else if (ret < 0)
8358                         return ret;
8359                 level = wc->level;
8360         }
8361         return 0;
8362 }
8363
8364 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8365                                  struct btrfs_root *root,
8366                                  struct btrfs_path *path,
8367                                  struct walk_control *wc, int max_level)
8368 {
8369         int level = wc->level;
8370         int ret;
8371
8372         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8373         while (level < max_level && path->nodes[level]) {
8374                 wc->level = level;
8375                 if (path->slots[level] + 1 <
8376                     btrfs_header_nritems(path->nodes[level])) {
8377                         path->slots[level]++;
8378                         return 0;
8379                 } else {
8380                         ret = walk_up_proc(trans, root, path, wc);
8381                         if (ret > 0)
8382                                 return 0;
8383
8384                         if (path->locks[level]) {
8385                                 btrfs_tree_unlock_rw(path->nodes[level],
8386                                                      path->locks[level]);
8387                                 path->locks[level] = 0;
8388                         }
8389                         free_extent_buffer(path->nodes[level]);
8390                         path->nodes[level] = NULL;
8391                         level++;
8392                 }
8393         }
8394         return 1;
8395 }
8396
8397 /*
8398  * drop a subvolume tree.
8399  *
8400  * this function traverses the tree freeing any blocks that only
8401  * referenced by the tree.
8402  *
8403  * when a shared tree block is found. this function decreases its
8404  * reference count by one. if update_ref is true, this function
8405  * also make sure backrefs for the shared block and all lower level
8406  * blocks are properly updated.
8407  *
8408  * If called with for_reloc == 0, may exit early with -EAGAIN
8409  */
8410 int btrfs_drop_snapshot(struct btrfs_root *root,
8411                          struct btrfs_block_rsv *block_rsv, int update_ref,
8412                          int for_reloc)
8413 {
8414         struct btrfs_path *path;
8415         struct btrfs_trans_handle *trans;
8416         struct btrfs_root *tree_root = root->fs_info->tree_root;
8417         struct btrfs_root_item *root_item = &root->root_item;
8418         struct walk_control *wc;
8419         struct btrfs_key key;
8420         int err = 0;
8421         int ret;
8422         int level;
8423         bool root_dropped = false;
8424
8425         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8426
8427         path = btrfs_alloc_path();
8428         if (!path) {
8429                 err = -ENOMEM;
8430                 goto out;
8431         }
8432
8433         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8434         if (!wc) {
8435                 btrfs_free_path(path);
8436                 err = -ENOMEM;
8437                 goto out;
8438         }
8439
8440         trans = btrfs_start_transaction(tree_root, 0);
8441         if (IS_ERR(trans)) {
8442                 err = PTR_ERR(trans);
8443                 goto out_free;
8444         }
8445
8446         if (block_rsv)
8447                 trans->block_rsv = block_rsv;
8448
8449         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8450                 level = btrfs_header_level(root->node);
8451                 path->nodes[level] = btrfs_lock_root_node(root);
8452                 btrfs_set_lock_blocking(path->nodes[level]);
8453                 path->slots[level] = 0;
8454                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8455                 memset(&wc->update_progress, 0,
8456                        sizeof(wc->update_progress));
8457         } else {
8458                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8459                 memcpy(&wc->update_progress, &key,
8460                        sizeof(wc->update_progress));
8461
8462                 level = root_item->drop_level;
8463                 BUG_ON(level == 0);
8464                 path->lowest_level = level;
8465                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8466                 path->lowest_level = 0;
8467                 if (ret < 0) {
8468                         err = ret;
8469                         goto out_end_trans;
8470                 }
8471                 WARN_ON(ret > 0);
8472
8473                 /*
8474                  * unlock our path, this is safe because only this
8475                  * function is allowed to delete this snapshot
8476                  */
8477                 btrfs_unlock_up_safe(path, 0);
8478
8479                 level = btrfs_header_level(root->node);
8480                 while (1) {
8481                         btrfs_tree_lock(path->nodes[level]);
8482                         btrfs_set_lock_blocking(path->nodes[level]);
8483                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8484
8485                         ret = btrfs_lookup_extent_info(trans, root,
8486                                                 path->nodes[level]->start,
8487                                                 level, 1, &wc->refs[level],
8488                                                 &wc->flags[level]);
8489                         if (ret < 0) {
8490                                 err = ret;
8491                                 goto out_end_trans;
8492                         }
8493                         BUG_ON(wc->refs[level] == 0);
8494
8495                         if (level == root_item->drop_level)
8496                                 break;
8497
8498                         btrfs_tree_unlock(path->nodes[level]);
8499                         path->locks[level] = 0;
8500                         WARN_ON(wc->refs[level] != 1);
8501                         level--;
8502                 }
8503         }
8504
8505         wc->level = level;
8506         wc->shared_level = -1;
8507         wc->stage = DROP_REFERENCE;
8508         wc->update_ref = update_ref;
8509         wc->keep_locks = 0;
8510         wc->for_reloc = for_reloc;
8511         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8512
8513         while (1) {
8514
8515                 ret = walk_down_tree(trans, root, path, wc);
8516                 if (ret < 0) {
8517                         err = ret;
8518                         break;
8519                 }
8520
8521                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8522                 if (ret < 0) {
8523                         err = ret;
8524                         break;
8525                 }
8526
8527                 if (ret > 0) {
8528                         BUG_ON(wc->stage != DROP_REFERENCE);
8529                         break;
8530                 }
8531
8532                 if (wc->stage == DROP_REFERENCE) {
8533                         level = wc->level;
8534                         btrfs_node_key(path->nodes[level],
8535                                        &root_item->drop_progress,
8536                                        path->slots[level]);
8537                         root_item->drop_level = level;
8538                 }
8539
8540                 BUG_ON(wc->level == 0);
8541                 if (btrfs_should_end_transaction(trans, tree_root) ||
8542                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8543                         ret = btrfs_update_root(trans, tree_root,
8544                                                 &root->root_key,
8545                                                 root_item);
8546                         if (ret) {
8547                                 btrfs_abort_transaction(trans, tree_root, ret);
8548                                 err = ret;
8549                                 goto out_end_trans;
8550                         }
8551
8552                         btrfs_end_transaction_throttle(trans, tree_root);
8553                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8554                                 pr_debug("BTRFS: drop snapshot early exit\n");
8555                                 err = -EAGAIN;
8556                                 goto out_free;
8557                         }
8558
8559                         trans = btrfs_start_transaction(tree_root, 0);
8560                         if (IS_ERR(trans)) {
8561                                 err = PTR_ERR(trans);
8562                                 goto out_free;
8563                         }
8564                         if (block_rsv)
8565                                 trans->block_rsv = block_rsv;
8566                 }
8567         }
8568         btrfs_release_path(path);
8569         if (err)
8570                 goto out_end_trans;
8571
8572         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8573         if (ret) {
8574                 btrfs_abort_transaction(trans, tree_root, ret);
8575                 goto out_end_trans;
8576         }
8577
8578         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8579                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8580                                       NULL, NULL);
8581                 if (ret < 0) {
8582                         btrfs_abort_transaction(trans, tree_root, ret);
8583                         err = ret;
8584                         goto out_end_trans;
8585                 } else if (ret > 0) {
8586                         /* if we fail to delete the orphan item this time
8587                          * around, it'll get picked up the next time.
8588                          *
8589                          * The most common failure here is just -ENOENT.
8590                          */
8591                         btrfs_del_orphan_item(trans, tree_root,
8592                                               root->root_key.objectid);
8593                 }
8594         }
8595
8596         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8597                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8598         } else {
8599                 free_extent_buffer(root->node);
8600                 free_extent_buffer(root->commit_root);
8601                 btrfs_put_fs_root(root);
8602         }
8603         root_dropped = true;
8604 out_end_trans:
8605         btrfs_end_transaction_throttle(trans, tree_root);
8606 out_free:
8607         kfree(wc);
8608         btrfs_free_path(path);
8609 out:
8610         /*
8611          * So if we need to stop dropping the snapshot for whatever reason we
8612          * need to make sure to add it back to the dead root list so that we
8613          * keep trying to do the work later.  This also cleans up roots if we
8614          * don't have it in the radix (like when we recover after a power fail
8615          * or unmount) so we don't leak memory.
8616          */
8617         if (!for_reloc && root_dropped == false)
8618                 btrfs_add_dead_root(root);
8619         if (err && err != -EAGAIN)
8620                 btrfs_std_error(root->fs_info, err);
8621         return err;
8622 }
8623
8624 /*
8625  * drop subtree rooted at tree block 'node'.
8626  *
8627  * NOTE: this function will unlock and release tree block 'node'
8628  * only used by relocation code
8629  */
8630 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8631                         struct btrfs_root *root,
8632                         struct extent_buffer *node,
8633                         struct extent_buffer *parent)
8634 {
8635         struct btrfs_path *path;
8636         struct walk_control *wc;
8637         int level;
8638         int parent_level;
8639         int ret = 0;
8640         int wret;
8641
8642         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8643
8644         path = btrfs_alloc_path();
8645         if (!path)
8646                 return -ENOMEM;
8647
8648         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8649         if (!wc) {
8650                 btrfs_free_path(path);
8651                 return -ENOMEM;
8652         }
8653
8654         btrfs_assert_tree_locked(parent);
8655         parent_level = btrfs_header_level(parent);
8656         extent_buffer_get(parent);
8657         path->nodes[parent_level] = parent;
8658         path->slots[parent_level] = btrfs_header_nritems(parent);
8659
8660         btrfs_assert_tree_locked(node);
8661         level = btrfs_header_level(node);
8662         path->nodes[level] = node;
8663         path->slots[level] = 0;
8664         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8665
8666         wc->refs[parent_level] = 1;
8667         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8668         wc->level = level;
8669         wc->shared_level = -1;
8670         wc->stage = DROP_REFERENCE;
8671         wc->update_ref = 0;
8672         wc->keep_locks = 1;
8673         wc->for_reloc = 1;
8674         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8675
8676         while (1) {
8677                 wret = walk_down_tree(trans, root, path, wc);
8678                 if (wret < 0) {
8679                         ret = wret;
8680                         break;
8681                 }
8682
8683                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8684                 if (wret < 0)
8685                         ret = wret;
8686                 if (wret != 0)
8687                         break;
8688         }
8689
8690         kfree(wc);
8691         btrfs_free_path(path);
8692         return ret;
8693 }
8694
8695 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8696 {
8697         u64 num_devices;
8698         u64 stripped;
8699
8700         /*
8701          * if restripe for this chunk_type is on pick target profile and
8702          * return, otherwise do the usual balance
8703          */
8704         stripped = get_restripe_target(root->fs_info, flags);
8705         if (stripped)
8706                 return extended_to_chunk(stripped);
8707
8708         num_devices = root->fs_info->fs_devices->rw_devices;
8709
8710         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8711                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8712                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8713
8714         if (num_devices == 1) {
8715                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8716                 stripped = flags & ~stripped;
8717
8718                 /* turn raid0 into single device chunks */
8719                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8720                         return stripped;
8721
8722                 /* turn mirroring into duplication */
8723                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8724                              BTRFS_BLOCK_GROUP_RAID10))
8725                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8726         } else {
8727                 /* they already had raid on here, just return */
8728                 if (flags & stripped)
8729                         return flags;
8730
8731                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8732                 stripped = flags & ~stripped;
8733
8734                 /* switch duplicated blocks with raid1 */
8735                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8736                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8737
8738                 /* this is drive concat, leave it alone */
8739         }
8740
8741         return flags;
8742 }
8743
8744 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8745 {
8746         struct btrfs_space_info *sinfo = cache->space_info;
8747         u64 num_bytes;
8748         u64 min_allocable_bytes;
8749         int ret = -ENOSPC;
8750
8751
8752         /*
8753          * We need some metadata space and system metadata space for
8754          * allocating chunks in some corner cases until we force to set
8755          * it to be readonly.
8756          */
8757         if ((sinfo->flags &
8758              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8759             !force)
8760                 min_allocable_bytes = 1 * 1024 * 1024;
8761         else
8762                 min_allocable_bytes = 0;
8763
8764         spin_lock(&sinfo->lock);
8765         spin_lock(&cache->lock);
8766
8767         if (cache->ro) {
8768                 ret = 0;
8769                 goto out;
8770         }
8771
8772         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8773                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8774
8775         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8776             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8777             min_allocable_bytes <= sinfo->total_bytes) {
8778                 sinfo->bytes_readonly += num_bytes;
8779                 cache->ro = 1;
8780                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8781                 ret = 0;
8782         }
8783 out:
8784         spin_unlock(&cache->lock);
8785         spin_unlock(&sinfo->lock);
8786         return ret;
8787 }
8788
8789 int btrfs_set_block_group_ro(struct btrfs_root *root,
8790                              struct btrfs_block_group_cache *cache)
8791
8792 {
8793         struct btrfs_trans_handle *trans;
8794         u64 alloc_flags;
8795         int ret;
8796
8797         BUG_ON(cache->ro);
8798
8799 again:
8800         trans = btrfs_join_transaction(root);
8801         if (IS_ERR(trans))
8802                 return PTR_ERR(trans);
8803
8804         /*
8805          * we're not allowed to set block groups readonly after the dirty
8806          * block groups cache has started writing.  If it already started,
8807          * back off and let this transaction commit
8808          */
8809         mutex_lock(&root->fs_info->ro_block_group_mutex);
8810         if (trans->transaction->dirty_bg_run) {
8811                 u64 transid = trans->transid;
8812
8813                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8814                 btrfs_end_transaction(trans, root);
8815
8816                 ret = btrfs_wait_for_commit(root, transid);
8817                 if (ret)
8818                         return ret;
8819                 goto again;
8820         }
8821
8822         /*
8823          * if we are changing raid levels, try to allocate a corresponding
8824          * block group with the new raid level.
8825          */
8826         alloc_flags = update_block_group_flags(root, cache->flags);
8827         if (alloc_flags != cache->flags) {
8828                 ret = do_chunk_alloc(trans, root, alloc_flags,
8829                                      CHUNK_ALLOC_FORCE);
8830                 /*
8831                  * ENOSPC is allowed here, we may have enough space
8832                  * already allocated at the new raid level to
8833                  * carry on
8834                  */
8835                 if (ret == -ENOSPC)
8836                         ret = 0;
8837                 if (ret < 0)
8838                         goto out;
8839         }
8840
8841         ret = set_block_group_ro(cache, 0);
8842         if (!ret)
8843                 goto out;
8844         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8845         ret = do_chunk_alloc(trans, root, alloc_flags,
8846                              CHUNK_ALLOC_FORCE);
8847         if (ret < 0)
8848                 goto out;
8849         ret = set_block_group_ro(cache, 0);
8850 out:
8851         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8852                 alloc_flags = update_block_group_flags(root, cache->flags);
8853                 lock_chunks(root->fs_info->chunk_root);
8854                 check_system_chunk(trans, root, alloc_flags);
8855                 unlock_chunks(root->fs_info->chunk_root);
8856         }
8857         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8858
8859         btrfs_end_transaction(trans, root);
8860         return ret;
8861 }
8862
8863 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8864                             struct btrfs_root *root, u64 type)
8865 {
8866         u64 alloc_flags = get_alloc_profile(root, type);
8867         return do_chunk_alloc(trans, root, alloc_flags,
8868                               CHUNK_ALLOC_FORCE);
8869 }
8870
8871 /*
8872  * helper to account the unused space of all the readonly block group in the
8873  * space_info. takes mirrors into account.
8874  */
8875 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8876 {
8877         struct btrfs_block_group_cache *block_group;
8878         u64 free_bytes = 0;
8879         int factor;
8880
8881         /* It's df, we don't care if it's racey */
8882         if (list_empty(&sinfo->ro_bgs))
8883                 return 0;
8884
8885         spin_lock(&sinfo->lock);
8886         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8887                 spin_lock(&block_group->lock);
8888
8889                 if (!block_group->ro) {
8890                         spin_unlock(&block_group->lock);
8891                         continue;
8892                 }
8893
8894                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8895                                           BTRFS_BLOCK_GROUP_RAID10 |
8896                                           BTRFS_BLOCK_GROUP_DUP))
8897                         factor = 2;
8898                 else
8899                         factor = 1;
8900
8901                 free_bytes += (block_group->key.offset -
8902                                btrfs_block_group_used(&block_group->item)) *
8903                                factor;
8904
8905                 spin_unlock(&block_group->lock);
8906         }
8907         spin_unlock(&sinfo->lock);
8908
8909         return free_bytes;
8910 }
8911
8912 void btrfs_set_block_group_rw(struct btrfs_root *root,
8913                               struct btrfs_block_group_cache *cache)
8914 {
8915         struct btrfs_space_info *sinfo = cache->space_info;
8916         u64 num_bytes;
8917
8918         BUG_ON(!cache->ro);
8919
8920         spin_lock(&sinfo->lock);
8921         spin_lock(&cache->lock);
8922         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8923                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8924         sinfo->bytes_readonly -= num_bytes;
8925         cache->ro = 0;
8926         list_del_init(&cache->ro_list);
8927         spin_unlock(&cache->lock);
8928         spin_unlock(&sinfo->lock);
8929 }
8930
8931 /*
8932  * checks to see if its even possible to relocate this block group.
8933  *
8934  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8935  * ok to go ahead and try.
8936  */
8937 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8938 {
8939         struct btrfs_block_group_cache *block_group;
8940         struct btrfs_space_info *space_info;
8941         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8942         struct btrfs_device *device;
8943         struct btrfs_trans_handle *trans;
8944         u64 min_free;
8945         u64 dev_min = 1;
8946         u64 dev_nr = 0;
8947         u64 target;
8948         int index;
8949         int full = 0;
8950         int ret = 0;
8951
8952         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8953
8954         /* odd, couldn't find the block group, leave it alone */
8955         if (!block_group)
8956                 return -1;
8957
8958         min_free = btrfs_block_group_used(&block_group->item);
8959
8960         /* no bytes used, we're good */
8961         if (!min_free)
8962                 goto out;
8963
8964         space_info = block_group->space_info;
8965         spin_lock(&space_info->lock);
8966
8967         full = space_info->full;
8968
8969         /*
8970          * if this is the last block group we have in this space, we can't
8971          * relocate it unless we're able to allocate a new chunk below.
8972          *
8973          * Otherwise, we need to make sure we have room in the space to handle
8974          * all of the extents from this block group.  If we can, we're good
8975          */
8976         if ((space_info->total_bytes != block_group->key.offset) &&
8977             (space_info->bytes_used + space_info->bytes_reserved +
8978              space_info->bytes_pinned + space_info->bytes_readonly +
8979              min_free < space_info->total_bytes)) {
8980                 spin_unlock(&space_info->lock);
8981                 goto out;
8982         }
8983         spin_unlock(&space_info->lock);
8984
8985         /*
8986          * ok we don't have enough space, but maybe we have free space on our
8987          * devices to allocate new chunks for relocation, so loop through our
8988          * alloc devices and guess if we have enough space.  if this block
8989          * group is going to be restriped, run checks against the target
8990          * profile instead of the current one.
8991          */
8992         ret = -1;
8993
8994         /*
8995          * index:
8996          *      0: raid10
8997          *      1: raid1
8998          *      2: dup
8999          *      3: raid0
9000          *      4: single
9001          */
9002         target = get_restripe_target(root->fs_info, block_group->flags);
9003         if (target) {
9004                 index = __get_raid_index(extended_to_chunk(target));
9005         } else {
9006                 /*
9007                  * this is just a balance, so if we were marked as full
9008                  * we know there is no space for a new chunk
9009                  */
9010                 if (full)
9011                         goto out;
9012
9013                 index = get_block_group_index(block_group);
9014         }
9015
9016         if (index == BTRFS_RAID_RAID10) {
9017                 dev_min = 4;
9018                 /* Divide by 2 */
9019                 min_free >>= 1;
9020         } else if (index == BTRFS_RAID_RAID1) {
9021                 dev_min = 2;
9022         } else if (index == BTRFS_RAID_DUP) {
9023                 /* Multiply by 2 */
9024                 min_free <<= 1;
9025         } else if (index == BTRFS_RAID_RAID0) {
9026                 dev_min = fs_devices->rw_devices;
9027                 min_free = div64_u64(min_free, dev_min);
9028         }
9029
9030         /* We need to do this so that we can look at pending chunks */
9031         trans = btrfs_join_transaction(root);
9032         if (IS_ERR(trans)) {
9033                 ret = PTR_ERR(trans);
9034                 goto out;
9035         }
9036
9037         mutex_lock(&root->fs_info->chunk_mutex);
9038         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9039                 u64 dev_offset;
9040
9041                 /*
9042                  * check to make sure we can actually find a chunk with enough
9043                  * space to fit our block group in.
9044                  */
9045                 if (device->total_bytes > device->bytes_used + min_free &&
9046                     !device->is_tgtdev_for_dev_replace) {
9047                         ret = find_free_dev_extent(trans, device, min_free,
9048                                                    &dev_offset, NULL);
9049                         if (!ret)
9050                                 dev_nr++;
9051
9052                         if (dev_nr >= dev_min)
9053                                 break;
9054
9055                         ret = -1;
9056                 }
9057         }
9058         mutex_unlock(&root->fs_info->chunk_mutex);
9059         btrfs_end_transaction(trans, root);
9060 out:
9061         btrfs_put_block_group(block_group);
9062         return ret;
9063 }
9064
9065 static int find_first_block_group(struct btrfs_root *root,
9066                 struct btrfs_path *path, struct btrfs_key *key)
9067 {
9068         int ret = 0;
9069         struct btrfs_key found_key;
9070         struct extent_buffer *leaf;
9071         int slot;
9072
9073         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9074         if (ret < 0)
9075                 goto out;
9076
9077         while (1) {
9078                 slot = path->slots[0];
9079                 leaf = path->nodes[0];
9080                 if (slot >= btrfs_header_nritems(leaf)) {
9081                         ret = btrfs_next_leaf(root, path);
9082                         if (ret == 0)
9083                                 continue;
9084                         if (ret < 0)
9085                                 goto out;
9086                         break;
9087                 }
9088                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9089
9090                 if (found_key.objectid >= key->objectid &&
9091                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9092                         ret = 0;
9093                         goto out;
9094                 }
9095                 path->slots[0]++;
9096         }
9097 out:
9098         return ret;
9099 }
9100
9101 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9102 {
9103         struct btrfs_block_group_cache *block_group;
9104         u64 last = 0;
9105
9106         while (1) {
9107                 struct inode *inode;
9108
9109                 block_group = btrfs_lookup_first_block_group(info, last);
9110                 while (block_group) {
9111                         spin_lock(&block_group->lock);
9112                         if (block_group->iref)
9113                                 break;
9114                         spin_unlock(&block_group->lock);
9115                         block_group = next_block_group(info->tree_root,
9116                                                        block_group);
9117                 }
9118                 if (!block_group) {
9119                         if (last == 0)
9120                                 break;
9121                         last = 0;
9122                         continue;
9123                 }
9124
9125                 inode = block_group->inode;
9126                 block_group->iref = 0;
9127                 block_group->inode = NULL;
9128                 spin_unlock(&block_group->lock);
9129                 iput(inode);
9130                 last = block_group->key.objectid + block_group->key.offset;
9131                 btrfs_put_block_group(block_group);
9132         }
9133 }
9134
9135 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9136 {
9137         struct btrfs_block_group_cache *block_group;
9138         struct btrfs_space_info *space_info;
9139         struct btrfs_caching_control *caching_ctl;
9140         struct rb_node *n;
9141
9142         down_write(&info->commit_root_sem);
9143         while (!list_empty(&info->caching_block_groups)) {
9144                 caching_ctl = list_entry(info->caching_block_groups.next,
9145                                          struct btrfs_caching_control, list);
9146                 list_del(&caching_ctl->list);
9147                 put_caching_control(caching_ctl);
9148         }
9149         up_write(&info->commit_root_sem);
9150
9151         spin_lock(&info->unused_bgs_lock);
9152         while (!list_empty(&info->unused_bgs)) {
9153                 block_group = list_first_entry(&info->unused_bgs,
9154                                                struct btrfs_block_group_cache,
9155                                                bg_list);
9156                 list_del_init(&block_group->bg_list);
9157                 btrfs_put_block_group(block_group);
9158         }
9159         spin_unlock(&info->unused_bgs_lock);
9160
9161         spin_lock(&info->block_group_cache_lock);
9162         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9163                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9164                                        cache_node);
9165                 rb_erase(&block_group->cache_node,
9166                          &info->block_group_cache_tree);
9167                 RB_CLEAR_NODE(&block_group->cache_node);
9168                 spin_unlock(&info->block_group_cache_lock);
9169
9170                 down_write(&block_group->space_info->groups_sem);
9171                 list_del(&block_group->list);
9172                 up_write(&block_group->space_info->groups_sem);
9173
9174                 if (block_group->cached == BTRFS_CACHE_STARTED)
9175                         wait_block_group_cache_done(block_group);
9176
9177                 /*
9178                  * We haven't cached this block group, which means we could
9179                  * possibly have excluded extents on this block group.
9180                  */
9181                 if (block_group->cached == BTRFS_CACHE_NO ||
9182                     block_group->cached == BTRFS_CACHE_ERROR)
9183                         free_excluded_extents(info->extent_root, block_group);
9184
9185                 btrfs_remove_free_space_cache(block_group);
9186                 btrfs_put_block_group(block_group);
9187
9188                 spin_lock(&info->block_group_cache_lock);
9189         }
9190         spin_unlock(&info->block_group_cache_lock);
9191
9192         /* now that all the block groups are freed, go through and
9193          * free all the space_info structs.  This is only called during
9194          * the final stages of unmount, and so we know nobody is
9195          * using them.  We call synchronize_rcu() once before we start,
9196          * just to be on the safe side.
9197          */
9198         synchronize_rcu();
9199
9200         release_global_block_rsv(info);
9201
9202         while (!list_empty(&info->space_info)) {
9203                 int i;
9204
9205                 space_info = list_entry(info->space_info.next,
9206                                         struct btrfs_space_info,
9207                                         list);
9208                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9209                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9210                             space_info->bytes_reserved > 0 ||
9211                             space_info->bytes_may_use > 0)) {
9212                                 dump_space_info(space_info, 0, 0);
9213                         }
9214                 }
9215                 list_del(&space_info->list);
9216                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9217                         struct kobject *kobj;
9218                         kobj = space_info->block_group_kobjs[i];
9219                         space_info->block_group_kobjs[i] = NULL;
9220                         if (kobj) {
9221                                 kobject_del(kobj);
9222                                 kobject_put(kobj);
9223                         }
9224                 }
9225                 kobject_del(&space_info->kobj);
9226                 kobject_put(&space_info->kobj);
9227         }
9228         return 0;
9229 }
9230
9231 static void __link_block_group(struct btrfs_space_info *space_info,
9232                                struct btrfs_block_group_cache *cache)
9233 {
9234         int index = get_block_group_index(cache);
9235         bool first = false;
9236
9237         down_write(&space_info->groups_sem);
9238         if (list_empty(&space_info->block_groups[index]))
9239                 first = true;
9240         list_add_tail(&cache->list, &space_info->block_groups[index]);
9241         up_write(&space_info->groups_sem);
9242
9243         if (first) {
9244                 struct raid_kobject *rkobj;
9245                 int ret;
9246
9247                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9248                 if (!rkobj)
9249                         goto out_err;
9250                 rkobj->raid_type = index;
9251                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9252                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9253                                   "%s", get_raid_name(index));
9254                 if (ret) {
9255                         kobject_put(&rkobj->kobj);
9256                         goto out_err;
9257                 }
9258                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9259         }
9260
9261         return;
9262 out_err:
9263         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9264 }
9265
9266 static struct btrfs_block_group_cache *
9267 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9268 {
9269         struct btrfs_block_group_cache *cache;
9270
9271         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9272         if (!cache)
9273                 return NULL;
9274
9275         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9276                                         GFP_NOFS);
9277         if (!cache->free_space_ctl) {
9278                 kfree(cache);
9279                 return NULL;
9280         }
9281
9282         cache->key.objectid = start;
9283         cache->key.offset = size;
9284         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9285
9286         cache->sectorsize = root->sectorsize;
9287         cache->fs_info = root->fs_info;
9288         cache->full_stripe_len = btrfs_full_stripe_len(root,
9289                                                &root->fs_info->mapping_tree,
9290                                                start);
9291         atomic_set(&cache->count, 1);
9292         spin_lock_init(&cache->lock);
9293         init_rwsem(&cache->data_rwsem);
9294         INIT_LIST_HEAD(&cache->list);
9295         INIT_LIST_HEAD(&cache->cluster_list);
9296         INIT_LIST_HEAD(&cache->bg_list);
9297         INIT_LIST_HEAD(&cache->ro_list);
9298         INIT_LIST_HEAD(&cache->dirty_list);
9299         INIT_LIST_HEAD(&cache->io_list);
9300         btrfs_init_free_space_ctl(cache);
9301         atomic_set(&cache->trimming, 0);
9302
9303         return cache;
9304 }
9305
9306 int btrfs_read_block_groups(struct btrfs_root *root)
9307 {
9308         struct btrfs_path *path;
9309         int ret;
9310         struct btrfs_block_group_cache *cache;
9311         struct btrfs_fs_info *info = root->fs_info;
9312         struct btrfs_space_info *space_info;
9313         struct btrfs_key key;
9314         struct btrfs_key found_key;
9315         struct extent_buffer *leaf;
9316         int need_clear = 0;
9317         u64 cache_gen;
9318
9319         root = info->extent_root;
9320         key.objectid = 0;
9321         key.offset = 0;
9322         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9323         path = btrfs_alloc_path();
9324         if (!path)
9325                 return -ENOMEM;
9326         path->reada = 1;
9327
9328         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9329         if (btrfs_test_opt(root, SPACE_CACHE) &&
9330             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9331                 need_clear = 1;
9332         if (btrfs_test_opt(root, CLEAR_CACHE))
9333                 need_clear = 1;
9334
9335         while (1) {
9336                 ret = find_first_block_group(root, path, &key);
9337                 if (ret > 0)
9338                         break;
9339                 if (ret != 0)
9340                         goto error;
9341
9342                 leaf = path->nodes[0];
9343                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9344
9345                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9346                                                        found_key.offset);
9347                 if (!cache) {
9348                         ret = -ENOMEM;
9349                         goto error;
9350                 }
9351
9352                 if (need_clear) {
9353                         /*
9354                          * When we mount with old space cache, we need to
9355                          * set BTRFS_DC_CLEAR and set dirty flag.
9356                          *
9357                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9358                          *    truncate the old free space cache inode and
9359                          *    setup a new one.
9360                          * b) Setting 'dirty flag' makes sure that we flush
9361                          *    the new space cache info onto disk.
9362                          */
9363                         if (btrfs_test_opt(root, SPACE_CACHE))
9364                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9365                 }
9366
9367                 read_extent_buffer(leaf, &cache->item,
9368                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9369                                    sizeof(cache->item));
9370                 cache->flags = btrfs_block_group_flags(&cache->item);
9371
9372                 key.objectid = found_key.objectid + found_key.offset;
9373                 btrfs_release_path(path);
9374
9375                 /*
9376                  * We need to exclude the super stripes now so that the space
9377                  * info has super bytes accounted for, otherwise we'll think
9378                  * we have more space than we actually do.
9379                  */
9380                 ret = exclude_super_stripes(root, cache);
9381                 if (ret) {
9382                         /*
9383                          * We may have excluded something, so call this just in
9384                          * case.
9385                          */
9386                         free_excluded_extents(root, cache);
9387                         btrfs_put_block_group(cache);
9388                         goto error;
9389                 }
9390
9391                 /*
9392                  * check for two cases, either we are full, and therefore
9393                  * don't need to bother with the caching work since we won't
9394                  * find any space, or we are empty, and we can just add all
9395                  * the space in and be done with it.  This saves us _alot_ of
9396                  * time, particularly in the full case.
9397                  */
9398                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9399                         cache->last_byte_to_unpin = (u64)-1;
9400                         cache->cached = BTRFS_CACHE_FINISHED;
9401                         free_excluded_extents(root, cache);
9402                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9403                         cache->last_byte_to_unpin = (u64)-1;
9404                         cache->cached = BTRFS_CACHE_FINISHED;
9405                         add_new_free_space(cache, root->fs_info,
9406                                            found_key.objectid,
9407                                            found_key.objectid +
9408                                            found_key.offset);
9409                         free_excluded_extents(root, cache);
9410                 }
9411
9412                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9413                 if (ret) {
9414                         btrfs_remove_free_space_cache(cache);
9415                         btrfs_put_block_group(cache);
9416                         goto error;
9417                 }
9418
9419                 ret = update_space_info(info, cache->flags, found_key.offset,
9420                                         btrfs_block_group_used(&cache->item),
9421                                         &space_info);
9422                 if (ret) {
9423                         btrfs_remove_free_space_cache(cache);
9424                         spin_lock(&info->block_group_cache_lock);
9425                         rb_erase(&cache->cache_node,
9426                                  &info->block_group_cache_tree);
9427                         RB_CLEAR_NODE(&cache->cache_node);
9428                         spin_unlock(&info->block_group_cache_lock);
9429                         btrfs_put_block_group(cache);
9430                         goto error;
9431                 }
9432
9433                 cache->space_info = space_info;
9434                 spin_lock(&cache->space_info->lock);
9435                 cache->space_info->bytes_readonly += cache->bytes_super;
9436                 spin_unlock(&cache->space_info->lock);
9437
9438                 __link_block_group(space_info, cache);
9439
9440                 set_avail_alloc_bits(root->fs_info, cache->flags);
9441                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9442                         set_block_group_ro(cache, 1);
9443                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9444                         spin_lock(&info->unused_bgs_lock);
9445                         /* Should always be true but just in case. */
9446                         if (list_empty(&cache->bg_list)) {
9447                                 btrfs_get_block_group(cache);
9448                                 list_add_tail(&cache->bg_list,
9449                                               &info->unused_bgs);
9450                         }
9451                         spin_unlock(&info->unused_bgs_lock);
9452                 }
9453         }
9454
9455         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9456                 if (!(get_alloc_profile(root, space_info->flags) &
9457                       (BTRFS_BLOCK_GROUP_RAID10 |
9458                        BTRFS_BLOCK_GROUP_RAID1 |
9459                        BTRFS_BLOCK_GROUP_RAID5 |
9460                        BTRFS_BLOCK_GROUP_RAID6 |
9461                        BTRFS_BLOCK_GROUP_DUP)))
9462                         continue;
9463                 /*
9464                  * avoid allocating from un-mirrored block group if there are
9465                  * mirrored block groups.
9466                  */
9467                 list_for_each_entry(cache,
9468                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9469                                 list)
9470                         set_block_group_ro(cache, 1);
9471                 list_for_each_entry(cache,
9472                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9473                                 list)
9474                         set_block_group_ro(cache, 1);
9475         }
9476
9477         init_global_block_rsv(info);
9478         ret = 0;
9479 error:
9480         btrfs_free_path(path);
9481         return ret;
9482 }
9483
9484 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9485                                        struct btrfs_root *root)
9486 {
9487         struct btrfs_block_group_cache *block_group, *tmp;
9488         struct btrfs_root *extent_root = root->fs_info->extent_root;
9489         struct btrfs_block_group_item item;
9490         struct btrfs_key key;
9491         int ret = 0;
9492
9493         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9494                 if (ret)
9495                         goto next;
9496
9497                 spin_lock(&block_group->lock);
9498                 memcpy(&item, &block_group->item, sizeof(item));
9499                 memcpy(&key, &block_group->key, sizeof(key));
9500                 spin_unlock(&block_group->lock);
9501
9502                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9503                                         sizeof(item));
9504                 if (ret)
9505                         btrfs_abort_transaction(trans, extent_root, ret);
9506                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9507                                                key.objectid, key.offset);
9508                 if (ret)
9509                         btrfs_abort_transaction(trans, extent_root, ret);
9510 next:
9511                 list_del_init(&block_group->bg_list);
9512         }
9513 }
9514
9515 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9516                            struct btrfs_root *root, u64 bytes_used,
9517                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9518                            u64 size)
9519 {
9520         int ret;
9521         struct btrfs_root *extent_root;
9522         struct btrfs_block_group_cache *cache;
9523
9524         extent_root = root->fs_info->extent_root;
9525
9526         btrfs_set_log_full_commit(root->fs_info, trans);
9527
9528         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9529         if (!cache)
9530                 return -ENOMEM;
9531
9532         btrfs_set_block_group_used(&cache->item, bytes_used);
9533         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9534         btrfs_set_block_group_flags(&cache->item, type);
9535
9536         cache->flags = type;
9537         cache->last_byte_to_unpin = (u64)-1;
9538         cache->cached = BTRFS_CACHE_FINISHED;
9539         ret = exclude_super_stripes(root, cache);
9540         if (ret) {
9541                 /*
9542                  * We may have excluded something, so call this just in
9543                  * case.
9544                  */
9545                 free_excluded_extents(root, cache);
9546                 btrfs_put_block_group(cache);
9547                 return ret;
9548         }
9549
9550         add_new_free_space(cache, root->fs_info, chunk_offset,
9551                            chunk_offset + size);
9552
9553         free_excluded_extents(root, cache);
9554
9555         /*
9556          * Call to ensure the corresponding space_info object is created and
9557          * assigned to our block group, but don't update its counters just yet.
9558          * We want our bg to be added to the rbtree with its ->space_info set.
9559          */
9560         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9561                                 &cache->space_info);
9562         if (ret) {
9563                 btrfs_remove_free_space_cache(cache);
9564                 btrfs_put_block_group(cache);
9565                 return ret;
9566         }
9567
9568         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9569         if (ret) {
9570                 btrfs_remove_free_space_cache(cache);
9571                 btrfs_put_block_group(cache);
9572                 return ret;
9573         }
9574
9575         /*
9576          * Now that our block group has its ->space_info set and is inserted in
9577          * the rbtree, update the space info's counters.
9578          */
9579         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9580                                 &cache->space_info);
9581         if (ret) {
9582                 btrfs_remove_free_space_cache(cache);
9583                 spin_lock(&root->fs_info->block_group_cache_lock);
9584                 rb_erase(&cache->cache_node,
9585                          &root->fs_info->block_group_cache_tree);
9586                 RB_CLEAR_NODE(&cache->cache_node);
9587                 spin_unlock(&root->fs_info->block_group_cache_lock);
9588                 btrfs_put_block_group(cache);
9589                 return ret;
9590         }
9591         update_global_block_rsv(root->fs_info);
9592
9593         spin_lock(&cache->space_info->lock);
9594         cache->space_info->bytes_readonly += cache->bytes_super;
9595         spin_unlock(&cache->space_info->lock);
9596
9597         __link_block_group(cache->space_info, cache);
9598
9599         list_add_tail(&cache->bg_list, &trans->new_bgs);
9600
9601         set_avail_alloc_bits(extent_root->fs_info, type);
9602
9603         return 0;
9604 }
9605
9606 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9607 {
9608         u64 extra_flags = chunk_to_extended(flags) &
9609                                 BTRFS_EXTENDED_PROFILE_MASK;
9610
9611         write_seqlock(&fs_info->profiles_lock);
9612         if (flags & BTRFS_BLOCK_GROUP_DATA)
9613                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9614         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9615                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9616         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9617                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9618         write_sequnlock(&fs_info->profiles_lock);
9619 }
9620
9621 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9622                              struct btrfs_root *root, u64 group_start,
9623                              struct extent_map *em)
9624 {
9625         struct btrfs_path *path;
9626         struct btrfs_block_group_cache *block_group;
9627         struct btrfs_free_cluster *cluster;
9628         struct btrfs_root *tree_root = root->fs_info->tree_root;
9629         struct btrfs_key key;
9630         struct inode *inode;
9631         struct kobject *kobj = NULL;
9632         int ret;
9633         int index;
9634         int factor;
9635         struct btrfs_caching_control *caching_ctl = NULL;
9636         bool remove_em;
9637
9638         root = root->fs_info->extent_root;
9639
9640         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9641         BUG_ON(!block_group);
9642         BUG_ON(!block_group->ro);
9643
9644         /*
9645          * Free the reserved super bytes from this block group before
9646          * remove it.
9647          */
9648         free_excluded_extents(root, block_group);
9649
9650         memcpy(&key, &block_group->key, sizeof(key));
9651         index = get_block_group_index(block_group);
9652         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9653                                   BTRFS_BLOCK_GROUP_RAID1 |
9654                                   BTRFS_BLOCK_GROUP_RAID10))
9655                 factor = 2;
9656         else
9657                 factor = 1;
9658
9659         /* make sure this block group isn't part of an allocation cluster */
9660         cluster = &root->fs_info->data_alloc_cluster;
9661         spin_lock(&cluster->refill_lock);
9662         btrfs_return_cluster_to_free_space(block_group, cluster);
9663         spin_unlock(&cluster->refill_lock);
9664
9665         /*
9666          * make sure this block group isn't part of a metadata
9667          * allocation cluster
9668          */
9669         cluster = &root->fs_info->meta_alloc_cluster;
9670         spin_lock(&cluster->refill_lock);
9671         btrfs_return_cluster_to_free_space(block_group, cluster);
9672         spin_unlock(&cluster->refill_lock);
9673
9674         path = btrfs_alloc_path();
9675         if (!path) {
9676                 ret = -ENOMEM;
9677                 goto out;
9678         }
9679
9680         /*
9681          * get the inode first so any iput calls done for the io_list
9682          * aren't the final iput (no unlinks allowed now)
9683          */
9684         inode = lookup_free_space_inode(tree_root, block_group, path);
9685
9686         mutex_lock(&trans->transaction->cache_write_mutex);
9687         /*
9688          * make sure our free spache cache IO is done before remove the
9689          * free space inode
9690          */
9691         spin_lock(&trans->transaction->dirty_bgs_lock);
9692         if (!list_empty(&block_group->io_list)) {
9693                 list_del_init(&block_group->io_list);
9694
9695                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9696
9697                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9698                 btrfs_wait_cache_io(root, trans, block_group,
9699                                     &block_group->io_ctl, path,
9700                                     block_group->key.objectid);
9701                 btrfs_put_block_group(block_group);
9702                 spin_lock(&trans->transaction->dirty_bgs_lock);
9703         }
9704
9705         if (!list_empty(&block_group->dirty_list)) {
9706                 list_del_init(&block_group->dirty_list);
9707                 btrfs_put_block_group(block_group);
9708         }
9709         spin_unlock(&trans->transaction->dirty_bgs_lock);
9710         mutex_unlock(&trans->transaction->cache_write_mutex);
9711
9712         if (!IS_ERR(inode)) {
9713                 ret = btrfs_orphan_add(trans, inode);
9714                 if (ret) {
9715                         btrfs_add_delayed_iput(inode);
9716                         goto out;
9717                 }
9718                 clear_nlink(inode);
9719                 /* One for the block groups ref */
9720                 spin_lock(&block_group->lock);
9721                 if (block_group->iref) {
9722                         block_group->iref = 0;
9723                         block_group->inode = NULL;
9724                         spin_unlock(&block_group->lock);
9725                         iput(inode);
9726                 } else {
9727                         spin_unlock(&block_group->lock);
9728                 }
9729                 /* One for our lookup ref */
9730                 btrfs_add_delayed_iput(inode);
9731         }
9732
9733         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9734         key.offset = block_group->key.objectid;
9735         key.type = 0;
9736
9737         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9738         if (ret < 0)
9739                 goto out;
9740         if (ret > 0)
9741                 btrfs_release_path(path);
9742         if (ret == 0) {
9743                 ret = btrfs_del_item(trans, tree_root, path);
9744                 if (ret)
9745                         goto out;
9746                 btrfs_release_path(path);
9747         }
9748
9749         spin_lock(&root->fs_info->block_group_cache_lock);
9750         rb_erase(&block_group->cache_node,
9751                  &root->fs_info->block_group_cache_tree);
9752         RB_CLEAR_NODE(&block_group->cache_node);
9753
9754         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9755                 root->fs_info->first_logical_byte = (u64)-1;
9756         spin_unlock(&root->fs_info->block_group_cache_lock);
9757
9758         down_write(&block_group->space_info->groups_sem);
9759         /*
9760          * we must use list_del_init so people can check to see if they
9761          * are still on the list after taking the semaphore
9762          */
9763         list_del_init(&block_group->list);
9764         if (list_empty(&block_group->space_info->block_groups[index])) {
9765                 kobj = block_group->space_info->block_group_kobjs[index];
9766                 block_group->space_info->block_group_kobjs[index] = NULL;
9767                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9768         }
9769         up_write(&block_group->space_info->groups_sem);
9770         if (kobj) {
9771                 kobject_del(kobj);
9772                 kobject_put(kobj);
9773         }
9774
9775         if (block_group->has_caching_ctl)
9776                 caching_ctl = get_caching_control(block_group);
9777         if (block_group->cached == BTRFS_CACHE_STARTED)
9778                 wait_block_group_cache_done(block_group);
9779         if (block_group->has_caching_ctl) {
9780                 down_write(&root->fs_info->commit_root_sem);
9781                 if (!caching_ctl) {
9782                         struct btrfs_caching_control *ctl;
9783
9784                         list_for_each_entry(ctl,
9785                                     &root->fs_info->caching_block_groups, list)
9786                                 if (ctl->block_group == block_group) {
9787                                         caching_ctl = ctl;
9788                                         atomic_inc(&caching_ctl->count);
9789                                         break;
9790                                 }
9791                 }
9792                 if (caching_ctl)
9793                         list_del_init(&caching_ctl->list);
9794                 up_write(&root->fs_info->commit_root_sem);
9795                 if (caching_ctl) {
9796                         /* Once for the caching bgs list and once for us. */
9797                         put_caching_control(caching_ctl);
9798                         put_caching_control(caching_ctl);
9799                 }
9800         }
9801
9802         spin_lock(&trans->transaction->dirty_bgs_lock);
9803         if (!list_empty(&block_group->dirty_list)) {
9804                 WARN_ON(1);
9805         }
9806         if (!list_empty(&block_group->io_list)) {
9807                 WARN_ON(1);
9808         }
9809         spin_unlock(&trans->transaction->dirty_bgs_lock);
9810         btrfs_remove_free_space_cache(block_group);
9811
9812         spin_lock(&block_group->space_info->lock);
9813         list_del_init(&block_group->ro_list);
9814
9815         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9816                 WARN_ON(block_group->space_info->total_bytes
9817                         < block_group->key.offset);
9818                 WARN_ON(block_group->space_info->bytes_readonly
9819                         < block_group->key.offset);
9820                 WARN_ON(block_group->space_info->disk_total
9821                         < block_group->key.offset * factor);
9822         }
9823         block_group->space_info->total_bytes -= block_group->key.offset;
9824         block_group->space_info->bytes_readonly -= block_group->key.offset;
9825         block_group->space_info->disk_total -= block_group->key.offset * factor;
9826
9827         spin_unlock(&block_group->space_info->lock);
9828
9829         memcpy(&key, &block_group->key, sizeof(key));
9830
9831         lock_chunks(root);
9832         if (!list_empty(&em->list)) {
9833                 /* We're in the transaction->pending_chunks list. */
9834                 free_extent_map(em);
9835         }
9836         spin_lock(&block_group->lock);
9837         block_group->removed = 1;
9838         /*
9839          * At this point trimming can't start on this block group, because we
9840          * removed the block group from the tree fs_info->block_group_cache_tree
9841          * so no one can't find it anymore and even if someone already got this
9842          * block group before we removed it from the rbtree, they have already
9843          * incremented block_group->trimming - if they didn't, they won't find
9844          * any free space entries because we already removed them all when we
9845          * called btrfs_remove_free_space_cache().
9846          *
9847          * And we must not remove the extent map from the fs_info->mapping_tree
9848          * to prevent the same logical address range and physical device space
9849          * ranges from being reused for a new block group. This is because our
9850          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9851          * completely transactionless, so while it is trimming a range the
9852          * currently running transaction might finish and a new one start,
9853          * allowing for new block groups to be created that can reuse the same
9854          * physical device locations unless we take this special care.
9855          */
9856         remove_em = (atomic_read(&block_group->trimming) == 0);
9857         /*
9858          * Make sure a trimmer task always sees the em in the pinned_chunks list
9859          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9860          * before checking block_group->removed).
9861          */
9862         if (!remove_em) {
9863                 /*
9864                  * Our em might be in trans->transaction->pending_chunks which
9865                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9866                  * and so is the fs_info->pinned_chunks list.
9867                  *
9868                  * So at this point we must be holding the chunk_mutex to avoid
9869                  * any races with chunk allocation (more specifically at
9870                  * volumes.c:contains_pending_extent()), to ensure it always
9871                  * sees the em, either in the pending_chunks list or in the
9872                  * pinned_chunks list.
9873                  */
9874                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9875         }
9876         spin_unlock(&block_group->lock);
9877
9878         if (remove_em) {
9879                 struct extent_map_tree *em_tree;
9880
9881                 em_tree = &root->fs_info->mapping_tree.map_tree;
9882                 write_lock(&em_tree->lock);
9883                 /*
9884                  * The em might be in the pending_chunks list, so make sure the
9885                  * chunk mutex is locked, since remove_extent_mapping() will
9886                  * delete us from that list.
9887                  */
9888                 remove_extent_mapping(em_tree, em);
9889                 write_unlock(&em_tree->lock);
9890                 /* once for the tree */
9891                 free_extent_map(em);
9892         }
9893
9894         unlock_chunks(root);
9895
9896         btrfs_put_block_group(block_group);
9897         btrfs_put_block_group(block_group);
9898
9899         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9900         if (ret > 0)
9901                 ret = -EIO;
9902         if (ret < 0)
9903                 goto out;
9904
9905         ret = btrfs_del_item(trans, root, path);
9906 out:
9907         btrfs_free_path(path);
9908         return ret;
9909 }
9910
9911 /*
9912  * Process the unused_bgs list and remove any that don't have any allocated
9913  * space inside of them.
9914  */
9915 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9916 {
9917         struct btrfs_block_group_cache *block_group;
9918         struct btrfs_space_info *space_info;
9919         struct btrfs_root *root = fs_info->extent_root;
9920         struct btrfs_trans_handle *trans;
9921         int ret = 0;
9922
9923         if (!fs_info->open)
9924                 return;
9925
9926         spin_lock(&fs_info->unused_bgs_lock);
9927         while (!list_empty(&fs_info->unused_bgs)) {
9928                 u64 start, end;
9929
9930                 block_group = list_first_entry(&fs_info->unused_bgs,
9931                                                struct btrfs_block_group_cache,
9932                                                bg_list);
9933                 space_info = block_group->space_info;
9934                 list_del_init(&block_group->bg_list);
9935                 if (ret || btrfs_mixed_space_info(space_info)) {
9936                         btrfs_put_block_group(block_group);
9937                         continue;
9938                 }
9939                 spin_unlock(&fs_info->unused_bgs_lock);
9940
9941                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
9942
9943                 /* Don't want to race with allocators so take the groups_sem */
9944                 down_write(&space_info->groups_sem);
9945                 spin_lock(&block_group->lock);
9946                 if (block_group->reserved ||
9947                     btrfs_block_group_used(&block_group->item) ||
9948                     block_group->ro) {
9949                         /*
9950                          * We want to bail if we made new allocations or have
9951                          * outstanding allocations in this block group.  We do
9952                          * the ro check in case balance is currently acting on
9953                          * this block group.
9954                          */
9955                         spin_unlock(&block_group->lock);
9956                         up_write(&space_info->groups_sem);
9957                         goto next;
9958                 }
9959                 spin_unlock(&block_group->lock);
9960
9961                 /* We don't want to force the issue, only flip if it's ok. */
9962                 ret = set_block_group_ro(block_group, 0);
9963                 up_write(&space_info->groups_sem);
9964                 if (ret < 0) {
9965                         ret = 0;
9966                         goto next;
9967                 }
9968
9969                 /*
9970                  * Want to do this before we do anything else so we can recover
9971                  * properly if we fail to join the transaction.
9972                  */
9973                 /* 1 for btrfs_orphan_reserve_metadata() */
9974                 trans = btrfs_start_transaction(root, 1);
9975                 if (IS_ERR(trans)) {
9976                         btrfs_set_block_group_rw(root, block_group);
9977                         ret = PTR_ERR(trans);
9978                         goto next;
9979                 }
9980
9981                 /*
9982                  * We could have pending pinned extents for this block group,
9983                  * just delete them, we don't care about them anymore.
9984                  */
9985                 start = block_group->key.objectid;
9986                 end = start + block_group->key.offset - 1;
9987                 /*
9988                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
9989                  * btrfs_finish_extent_commit(). If we are at transaction N,
9990                  * another task might be running finish_extent_commit() for the
9991                  * previous transaction N - 1, and have seen a range belonging
9992                  * to the block group in freed_extents[] before we were able to
9993                  * clear the whole block group range from freed_extents[]. This
9994                  * means that task can lookup for the block group after we
9995                  * unpinned it from freed_extents[] and removed it, leading to
9996                  * a BUG_ON() at btrfs_unpin_extent_range().
9997                  */
9998                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9999                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10000                                   EXTENT_DIRTY, GFP_NOFS);
10001                 if (ret) {
10002                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10003                         btrfs_set_block_group_rw(root, block_group);
10004                         goto end_trans;
10005                 }
10006                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10007                                   EXTENT_DIRTY, GFP_NOFS);
10008                 if (ret) {
10009                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10010                         btrfs_set_block_group_rw(root, block_group);
10011                         goto end_trans;
10012                 }
10013                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10014
10015                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10016                 spin_lock(&space_info->lock);
10017                 spin_lock(&block_group->lock);
10018
10019                 space_info->bytes_pinned -= block_group->pinned;
10020                 space_info->bytes_readonly += block_group->pinned;
10021                 percpu_counter_add(&space_info->total_bytes_pinned,
10022                                    -block_group->pinned);
10023                 block_group->pinned = 0;
10024
10025                 spin_unlock(&block_group->lock);
10026                 spin_unlock(&space_info->lock);
10027
10028                 /*
10029                  * Btrfs_remove_chunk will abort the transaction if things go
10030                  * horribly wrong.
10031                  */
10032                 ret = btrfs_remove_chunk(trans, root,
10033                                          block_group->key.objectid);
10034 end_trans:
10035                 btrfs_end_transaction(trans, root);
10036 next:
10037                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10038                 btrfs_put_block_group(block_group);
10039                 spin_lock(&fs_info->unused_bgs_lock);
10040         }
10041         spin_unlock(&fs_info->unused_bgs_lock);
10042 }
10043
10044 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10045 {
10046         struct btrfs_space_info *space_info;
10047         struct btrfs_super_block *disk_super;
10048         u64 features;
10049         u64 flags;
10050         int mixed = 0;
10051         int ret;
10052
10053         disk_super = fs_info->super_copy;
10054         if (!btrfs_super_root(disk_super))
10055                 return 1;
10056
10057         features = btrfs_super_incompat_flags(disk_super);
10058         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10059                 mixed = 1;
10060
10061         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10062         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10063         if (ret)
10064                 goto out;
10065
10066         if (mixed) {
10067                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10068                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10069         } else {
10070                 flags = BTRFS_BLOCK_GROUP_METADATA;
10071                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10072                 if (ret)
10073                         goto out;
10074
10075                 flags = BTRFS_BLOCK_GROUP_DATA;
10076                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10077         }
10078 out:
10079         return ret;
10080 }
10081
10082 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10083 {
10084         return unpin_extent_range(root, start, end, false);
10085 }
10086
10087 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10088 {
10089         struct btrfs_fs_info *fs_info = root->fs_info;
10090         struct btrfs_block_group_cache *cache = NULL;
10091         u64 group_trimmed;
10092         u64 start;
10093         u64 end;
10094         u64 trimmed = 0;
10095         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10096         int ret = 0;
10097
10098         /*
10099          * try to trim all FS space, our block group may start from non-zero.
10100          */
10101         if (range->len == total_bytes)
10102                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10103         else
10104                 cache = btrfs_lookup_block_group(fs_info, range->start);
10105
10106         while (cache) {
10107                 if (cache->key.objectid >= (range->start + range->len)) {
10108                         btrfs_put_block_group(cache);
10109                         break;
10110                 }
10111
10112                 start = max(range->start, cache->key.objectid);
10113                 end = min(range->start + range->len,
10114                                 cache->key.objectid + cache->key.offset);
10115
10116                 if (end - start >= range->minlen) {
10117                         if (!block_group_cache_done(cache)) {
10118                                 ret = cache_block_group(cache, 0);
10119                                 if (ret) {
10120                                         btrfs_put_block_group(cache);
10121                                         break;
10122                                 }
10123                                 ret = wait_block_group_cache_done(cache);
10124                                 if (ret) {
10125                                         btrfs_put_block_group(cache);
10126                                         break;
10127                                 }
10128                         }
10129                         ret = btrfs_trim_block_group(cache,
10130                                                      &group_trimmed,
10131                                                      start,
10132                                                      end,
10133                                                      range->minlen);
10134
10135                         trimmed += group_trimmed;
10136                         if (ret) {
10137                                 btrfs_put_block_group(cache);
10138                                 break;
10139                         }
10140                 }
10141
10142                 cache = next_block_group(fs_info->tree_root, cache);
10143         }
10144
10145         range->len = trimmed;
10146         return ret;
10147 }
10148
10149 /*
10150  * btrfs_{start,end}_write_no_snapshoting() are similar to
10151  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10152  * data into the page cache through nocow before the subvolume is snapshoted,
10153  * but flush the data into disk after the snapshot creation, or to prevent
10154  * operations while snapshoting is ongoing and that cause the snapshot to be
10155  * inconsistent (writes followed by expanding truncates for example).
10156  */
10157 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10158 {
10159         percpu_counter_dec(&root->subv_writers->counter);
10160         /*
10161          * Make sure counter is updated before we wake up
10162          * waiters.
10163          */
10164         smp_mb();
10165         if (waitqueue_active(&root->subv_writers->wait))
10166                 wake_up(&root->subv_writers->wait);
10167 }
10168
10169 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10170 {
10171         if (atomic_read(&root->will_be_snapshoted))
10172                 return 0;
10173
10174         percpu_counter_inc(&root->subv_writers->counter);
10175         /*
10176          * Make sure counter is updated before we check for snapshot creation.
10177          */
10178         smp_mb();
10179         if (atomic_read(&root->will_be_snapshoted)) {
10180                 btrfs_end_write_no_snapshoting(root);
10181                 return 0;
10182         }
10183         return 1;
10184 }