2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
32 #include "print-tree.h"
33 #include "transaction.h"
37 #include "free-space-cache.h"
40 #undef SCRAMBLE_DELAYED_REFS
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated. This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
57 CHUNK_ALLOC_NO_FORCE = 0,
58 CHUNK_ALLOC_LIMITED = 1,
59 CHUNK_ALLOC_FORCE = 2,
63 * Control how reservations are dealt with.
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 * bytes_may_use as the ENOSPC accounting is done elsewhere
74 RESERVE_ALLOC_NO_ACCOUNT = 2,
77 static int update_block_group(struct btrfs_root *root,
78 u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80 struct btrfs_root *root,
81 u64 bytenr, u64 num_bytes, u64 parent,
82 u64 root_objectid, u64 owner_objectid,
83 u64 owner_offset, int refs_to_drop,
84 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86 struct extent_buffer *leaf,
87 struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, u64 owner, u64 offset,
92 struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94 struct btrfs_root *root,
95 u64 parent, u64 root_objectid,
96 u64 flags, struct btrfs_disk_key *key,
97 int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99 struct btrfs_root *extent_root, u64 flags,
101 static int find_next_key(struct btrfs_path *path, int level,
102 struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104 int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106 u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
109 int btrfs_pin_extent(struct btrfs_root *root,
110 u64 bytenr, u64 num_bytes, int reserved);
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 return cache->cached == BTRFS_CACHE_FINISHED ||
117 cache->cached == BTRFS_CACHE_ERROR;
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
122 return (cache->flags & bits) == bits;
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
127 atomic_inc(&cache->count);
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
132 if (atomic_dec_and_test(&cache->count)) {
133 WARN_ON(cache->pinned > 0);
134 WARN_ON(cache->reserved > 0);
135 kfree(cache->free_space_ctl);
141 * this adds the block group to the fs_info rb tree for the block group
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145 struct btrfs_block_group_cache *block_group)
148 struct rb_node *parent = NULL;
149 struct btrfs_block_group_cache *cache;
151 spin_lock(&info->block_group_cache_lock);
152 p = &info->block_group_cache_tree.rb_node;
156 cache = rb_entry(parent, struct btrfs_block_group_cache,
158 if (block_group->key.objectid < cache->key.objectid) {
160 } else if (block_group->key.objectid > cache->key.objectid) {
163 spin_unlock(&info->block_group_cache_lock);
168 rb_link_node(&block_group->cache_node, parent, p);
169 rb_insert_color(&block_group->cache_node,
170 &info->block_group_cache_tree);
172 if (info->first_logical_byte > block_group->key.objectid)
173 info->first_logical_byte = block_group->key.objectid;
175 spin_unlock(&info->block_group_cache_lock);
181 * This will return the block group at or after bytenr if contains is 0, else
182 * it will return the block group that contains the bytenr
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
188 struct btrfs_block_group_cache *cache, *ret = NULL;
192 spin_lock(&info->block_group_cache_lock);
193 n = info->block_group_cache_tree.rb_node;
196 cache = rb_entry(n, struct btrfs_block_group_cache,
198 end = cache->key.objectid + cache->key.offset - 1;
199 start = cache->key.objectid;
201 if (bytenr < start) {
202 if (!contains && (!ret || start < ret->key.objectid))
205 } else if (bytenr > start) {
206 if (contains && bytenr <= end) {
217 btrfs_get_block_group(ret);
218 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219 info->first_logical_byte = ret->key.objectid;
221 spin_unlock(&info->block_group_cache_lock);
226 static int add_excluded_extent(struct btrfs_root *root,
227 u64 start, u64 num_bytes)
229 u64 end = start + num_bytes - 1;
230 set_extent_bits(&root->fs_info->freed_extents[0],
231 start, end, EXTENT_UPTODATE, GFP_NOFS);
232 set_extent_bits(&root->fs_info->freed_extents[1],
233 start, end, EXTENT_UPTODATE, GFP_NOFS);
237 static void free_excluded_extents(struct btrfs_root *root,
238 struct btrfs_block_group_cache *cache)
242 start = cache->key.objectid;
243 end = start + cache->key.offset - 1;
245 clear_extent_bits(&root->fs_info->freed_extents[0],
246 start, end, EXTENT_UPTODATE, GFP_NOFS);
247 clear_extent_bits(&root->fs_info->freed_extents[1],
248 start, end, EXTENT_UPTODATE, GFP_NOFS);
251 static int exclude_super_stripes(struct btrfs_root *root,
252 struct btrfs_block_group_cache *cache)
259 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261 cache->bytes_super += stripe_len;
262 ret = add_excluded_extent(root, cache->key.objectid,
268 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269 bytenr = btrfs_sb_offset(i);
270 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271 cache->key.objectid, bytenr,
272 0, &logical, &nr, &stripe_len);
279 if (logical[nr] > cache->key.objectid +
283 if (logical[nr] + stripe_len <= cache->key.objectid)
287 if (start < cache->key.objectid) {
288 start = cache->key.objectid;
289 len = (logical[nr] + stripe_len) - start;
291 len = min_t(u64, stripe_len,
292 cache->key.objectid +
293 cache->key.offset - start);
296 cache->bytes_super += len;
297 ret = add_excluded_extent(root, start, len);
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
312 struct btrfs_caching_control *ctl;
314 spin_lock(&cache->lock);
315 if (cache->cached != BTRFS_CACHE_STARTED) {
316 spin_unlock(&cache->lock);
320 /* We're loading it the fast way, so we don't have a caching_ctl. */
321 if (!cache->caching_ctl) {
322 spin_unlock(&cache->lock);
326 ctl = cache->caching_ctl;
327 atomic_inc(&ctl->count);
328 spin_unlock(&cache->lock);
332 static void put_caching_control(struct btrfs_caching_control *ctl)
334 if (atomic_dec_and_test(&ctl->count))
339 * this is only called by cache_block_group, since we could have freed extents
340 * we need to check the pinned_extents for any extents that can't be used yet
341 * since their free space will be released as soon as the transaction commits.
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344 struct btrfs_fs_info *info, u64 start, u64 end)
346 u64 extent_start, extent_end, size, total_added = 0;
349 while (start < end) {
350 ret = find_first_extent_bit(info->pinned_extents, start,
351 &extent_start, &extent_end,
352 EXTENT_DIRTY | EXTENT_UPTODATE,
357 if (extent_start <= start) {
358 start = extent_end + 1;
359 } else if (extent_start > start && extent_start < end) {
360 size = extent_start - start;
362 ret = btrfs_add_free_space(block_group, start,
364 BUG_ON(ret); /* -ENOMEM or logic error */
365 start = extent_end + 1;
374 ret = btrfs_add_free_space(block_group, start, size);
375 BUG_ON(ret); /* -ENOMEM or logic error */
381 static noinline void caching_thread(struct btrfs_work *work)
383 struct btrfs_block_group_cache *block_group;
384 struct btrfs_fs_info *fs_info;
385 struct btrfs_caching_control *caching_ctl;
386 struct btrfs_root *extent_root;
387 struct btrfs_path *path;
388 struct extent_buffer *leaf;
389 struct btrfs_key key;
395 caching_ctl = container_of(work, struct btrfs_caching_control, work);
396 block_group = caching_ctl->block_group;
397 fs_info = block_group->fs_info;
398 extent_root = fs_info->extent_root;
400 path = btrfs_alloc_path();
404 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
407 * We don't want to deadlock with somebody trying to allocate a new
408 * extent for the extent root while also trying to search the extent
409 * root to add free space. So we skip locking and search the commit
410 * root, since its read-only
412 path->skip_locking = 1;
413 path->search_commit_root = 1;
418 key.type = BTRFS_EXTENT_ITEM_KEY;
420 mutex_lock(&caching_ctl->mutex);
421 /* need to make sure the commit_root doesn't disappear */
422 down_read(&fs_info->extent_commit_sem);
425 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429 leaf = path->nodes[0];
430 nritems = btrfs_header_nritems(leaf);
433 if (btrfs_fs_closing(fs_info) > 1) {
438 if (path->slots[0] < nritems) {
439 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
441 ret = find_next_key(path, 0, &key);
445 if (need_resched()) {
446 caching_ctl->progress = last;
447 btrfs_release_path(path);
448 up_read(&fs_info->extent_commit_sem);
449 mutex_unlock(&caching_ctl->mutex);
454 ret = btrfs_next_leaf(extent_root, path);
459 leaf = path->nodes[0];
460 nritems = btrfs_header_nritems(leaf);
464 if (key.objectid < last) {
467 key.type = BTRFS_EXTENT_ITEM_KEY;
469 caching_ctl->progress = last;
470 btrfs_release_path(path);
474 if (key.objectid < block_group->key.objectid) {
479 if (key.objectid >= block_group->key.objectid +
480 block_group->key.offset)
483 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
484 key.type == BTRFS_METADATA_ITEM_KEY) {
485 total_found += add_new_free_space(block_group,
488 if (key.type == BTRFS_METADATA_ITEM_KEY)
489 last = key.objectid +
490 fs_info->tree_root->leafsize;
492 last = key.objectid + key.offset;
494 if (total_found > (1024 * 1024 * 2)) {
496 wake_up(&caching_ctl->wait);
503 total_found += add_new_free_space(block_group, fs_info, last,
504 block_group->key.objectid +
505 block_group->key.offset);
506 caching_ctl->progress = (u64)-1;
508 spin_lock(&block_group->lock);
509 block_group->caching_ctl = NULL;
510 block_group->cached = BTRFS_CACHE_FINISHED;
511 spin_unlock(&block_group->lock);
514 btrfs_free_path(path);
515 up_read(&fs_info->extent_commit_sem);
517 free_excluded_extents(extent_root, block_group);
519 mutex_unlock(&caching_ctl->mutex);
522 spin_lock(&block_group->lock);
523 block_group->caching_ctl = NULL;
524 block_group->cached = BTRFS_CACHE_ERROR;
525 spin_unlock(&block_group->lock);
527 wake_up(&caching_ctl->wait);
529 put_caching_control(caching_ctl);
530 btrfs_put_block_group(block_group);
533 static int cache_block_group(struct btrfs_block_group_cache *cache,
537 struct btrfs_fs_info *fs_info = cache->fs_info;
538 struct btrfs_caching_control *caching_ctl;
541 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
545 INIT_LIST_HEAD(&caching_ctl->list);
546 mutex_init(&caching_ctl->mutex);
547 init_waitqueue_head(&caching_ctl->wait);
548 caching_ctl->block_group = cache;
549 caching_ctl->progress = cache->key.objectid;
550 atomic_set(&caching_ctl->count, 1);
551 caching_ctl->work.func = caching_thread;
553 spin_lock(&cache->lock);
555 * This should be a rare occasion, but this could happen I think in the
556 * case where one thread starts to load the space cache info, and then
557 * some other thread starts a transaction commit which tries to do an
558 * allocation while the other thread is still loading the space cache
559 * info. The previous loop should have kept us from choosing this block
560 * group, but if we've moved to the state where we will wait on caching
561 * block groups we need to first check if we're doing a fast load here,
562 * so we can wait for it to finish, otherwise we could end up allocating
563 * from a block group who's cache gets evicted for one reason or
566 while (cache->cached == BTRFS_CACHE_FAST) {
567 struct btrfs_caching_control *ctl;
569 ctl = cache->caching_ctl;
570 atomic_inc(&ctl->count);
571 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572 spin_unlock(&cache->lock);
576 finish_wait(&ctl->wait, &wait);
577 put_caching_control(ctl);
578 spin_lock(&cache->lock);
581 if (cache->cached != BTRFS_CACHE_NO) {
582 spin_unlock(&cache->lock);
586 WARN_ON(cache->caching_ctl);
587 cache->caching_ctl = caching_ctl;
588 cache->cached = BTRFS_CACHE_FAST;
589 spin_unlock(&cache->lock);
591 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592 ret = load_free_space_cache(fs_info, cache);
594 spin_lock(&cache->lock);
596 cache->caching_ctl = NULL;
597 cache->cached = BTRFS_CACHE_FINISHED;
598 cache->last_byte_to_unpin = (u64)-1;
600 if (load_cache_only) {
601 cache->caching_ctl = NULL;
602 cache->cached = BTRFS_CACHE_NO;
604 cache->cached = BTRFS_CACHE_STARTED;
607 spin_unlock(&cache->lock);
608 wake_up(&caching_ctl->wait);
610 put_caching_control(caching_ctl);
611 free_excluded_extents(fs_info->extent_root, cache);
616 * We are not going to do the fast caching, set cached to the
617 * appropriate value and wakeup any waiters.
619 spin_lock(&cache->lock);
620 if (load_cache_only) {
621 cache->caching_ctl = NULL;
622 cache->cached = BTRFS_CACHE_NO;
624 cache->cached = BTRFS_CACHE_STARTED;
626 spin_unlock(&cache->lock);
627 wake_up(&caching_ctl->wait);
630 if (load_cache_only) {
631 put_caching_control(caching_ctl);
635 down_write(&fs_info->extent_commit_sem);
636 atomic_inc(&caching_ctl->count);
637 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
638 up_write(&fs_info->extent_commit_sem);
640 btrfs_get_block_group(cache);
642 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
648 * return the block group that starts at or after bytenr
650 static struct btrfs_block_group_cache *
651 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 struct btrfs_block_group_cache *cache;
655 cache = block_group_cache_tree_search(info, bytenr, 0);
661 * return the block group that contains the given bytenr
663 struct btrfs_block_group_cache *btrfs_lookup_block_group(
664 struct btrfs_fs_info *info,
667 struct btrfs_block_group_cache *cache;
669 cache = block_group_cache_tree_search(info, bytenr, 1);
674 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
677 struct list_head *head = &info->space_info;
678 struct btrfs_space_info *found;
680 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
683 list_for_each_entry_rcu(found, head, list) {
684 if (found->flags & flags) {
694 * after adding space to the filesystem, we need to clear the full flags
695 * on all the space infos.
697 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 struct list_head *head = &info->space_info;
700 struct btrfs_space_info *found;
703 list_for_each_entry_rcu(found, head, list)
708 /* simple helper to search for an existing extent at a given offset */
709 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
712 struct btrfs_key key;
713 struct btrfs_path *path;
715 path = btrfs_alloc_path();
719 key.objectid = start;
721 key.type = BTRFS_EXTENT_ITEM_KEY;
722 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
725 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
726 if (key.objectid == start &&
727 key.type == BTRFS_METADATA_ITEM_KEY)
730 btrfs_free_path(path);
735 * helper function to lookup reference count and flags of a tree block.
737 * the head node for delayed ref is used to store the sum of all the
738 * reference count modifications queued up in the rbtree. the head
739 * node may also store the extent flags to set. This way you can check
740 * to see what the reference count and extent flags would be if all of
741 * the delayed refs are not processed.
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744 struct btrfs_root *root, u64 bytenr,
745 u64 offset, int metadata, u64 *refs, u64 *flags)
747 struct btrfs_delayed_ref_head *head;
748 struct btrfs_delayed_ref_root *delayed_refs;
749 struct btrfs_path *path;
750 struct btrfs_extent_item *ei;
751 struct extent_buffer *leaf;
752 struct btrfs_key key;
759 * If we don't have skinny metadata, don't bother doing anything
762 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763 offset = root->leafsize;
767 path = btrfs_alloc_path();
772 key.objectid = bytenr;
773 key.type = BTRFS_METADATA_ITEM_KEY;
776 key.objectid = bytenr;
777 key.type = BTRFS_EXTENT_ITEM_KEY;
782 path->skip_locking = 1;
783 path->search_commit_root = 1;
786 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
793 if (path->slots[0]) {
795 btrfs_item_key_to_cpu(path->nodes[0], &key,
797 if (key.objectid == bytenr &&
798 key.type == BTRFS_EXTENT_ITEM_KEY &&
799 key.offset == root->leafsize)
803 key.objectid = bytenr;
804 key.type = BTRFS_EXTENT_ITEM_KEY;
805 key.offset = root->leafsize;
806 btrfs_release_path(path);
812 leaf = path->nodes[0];
813 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
814 if (item_size >= sizeof(*ei)) {
815 ei = btrfs_item_ptr(leaf, path->slots[0],
816 struct btrfs_extent_item);
817 num_refs = btrfs_extent_refs(leaf, ei);
818 extent_flags = btrfs_extent_flags(leaf, ei);
820 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
821 struct btrfs_extent_item_v0 *ei0;
822 BUG_ON(item_size != sizeof(*ei0));
823 ei0 = btrfs_item_ptr(leaf, path->slots[0],
824 struct btrfs_extent_item_v0);
825 num_refs = btrfs_extent_refs_v0(leaf, ei0);
826 /* FIXME: this isn't correct for data */
827 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
832 BUG_ON(num_refs == 0);
842 delayed_refs = &trans->transaction->delayed_refs;
843 spin_lock(&delayed_refs->lock);
844 head = btrfs_find_delayed_ref_head(trans, bytenr);
846 if (!mutex_trylock(&head->mutex)) {
847 atomic_inc(&head->node.refs);
848 spin_unlock(&delayed_refs->lock);
850 btrfs_release_path(path);
853 * Mutex was contended, block until it's released and try
856 mutex_lock(&head->mutex);
857 mutex_unlock(&head->mutex);
858 btrfs_put_delayed_ref(&head->node);
861 if (head->extent_op && head->extent_op->update_flags)
862 extent_flags |= head->extent_op->flags_to_set;
864 BUG_ON(num_refs == 0);
866 num_refs += head->node.ref_mod;
867 mutex_unlock(&head->mutex);
869 spin_unlock(&delayed_refs->lock);
871 WARN_ON(num_refs == 0);
875 *flags = extent_flags;
877 btrfs_free_path(path);
882 * Back reference rules. Back refs have three main goals:
884 * 1) differentiate between all holders of references to an extent so that
885 * when a reference is dropped we can make sure it was a valid reference
886 * before freeing the extent.
888 * 2) Provide enough information to quickly find the holders of an extent
889 * if we notice a given block is corrupted or bad.
891 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
892 * maintenance. This is actually the same as #2, but with a slightly
893 * different use case.
895 * There are two kinds of back refs. The implicit back refs is optimized
896 * for pointers in non-shared tree blocks. For a given pointer in a block,
897 * back refs of this kind provide information about the block's owner tree
898 * and the pointer's key. These information allow us to find the block by
899 * b-tree searching. The full back refs is for pointers in tree blocks not
900 * referenced by their owner trees. The location of tree block is recorded
901 * in the back refs. Actually the full back refs is generic, and can be
902 * used in all cases the implicit back refs is used. The major shortcoming
903 * of the full back refs is its overhead. Every time a tree block gets
904 * COWed, we have to update back refs entry for all pointers in it.
906 * For a newly allocated tree block, we use implicit back refs for
907 * pointers in it. This means most tree related operations only involve
908 * implicit back refs. For a tree block created in old transaction, the
909 * only way to drop a reference to it is COW it. So we can detect the
910 * event that tree block loses its owner tree's reference and do the
911 * back refs conversion.
913 * When a tree block is COW'd through a tree, there are four cases:
915 * The reference count of the block is one and the tree is the block's
916 * owner tree. Nothing to do in this case.
918 * The reference count of the block is one and the tree is not the
919 * block's owner tree. In this case, full back refs is used for pointers
920 * in the block. Remove these full back refs, add implicit back refs for
921 * every pointers in the new block.
923 * The reference count of the block is greater than one and the tree is
924 * the block's owner tree. In this case, implicit back refs is used for
925 * pointers in the block. Add full back refs for every pointers in the
926 * block, increase lower level extents' reference counts. The original
927 * implicit back refs are entailed to the new block.
929 * The reference count of the block is greater than one and the tree is
930 * not the block's owner tree. Add implicit back refs for every pointer in
931 * the new block, increase lower level extents' reference count.
933 * Back Reference Key composing:
935 * The key objectid corresponds to the first byte in the extent,
936 * The key type is used to differentiate between types of back refs.
937 * There are different meanings of the key offset for different types
940 * File extents can be referenced by:
942 * - multiple snapshots, subvolumes, or different generations in one subvol
943 * - different files inside a single subvolume
944 * - different offsets inside a file (bookend extents in file.c)
946 * The extent ref structure for the implicit back refs has fields for:
948 * - Objectid of the subvolume root
949 * - objectid of the file holding the reference
950 * - original offset in the file
951 * - how many bookend extents
953 * The key offset for the implicit back refs is hash of the first
956 * The extent ref structure for the full back refs has field for:
958 * - number of pointers in the tree leaf
960 * The key offset for the implicit back refs is the first byte of
963 * When a file extent is allocated, The implicit back refs is used.
964 * the fields are filled in:
966 * (root_key.objectid, inode objectid, offset in file, 1)
968 * When a file extent is removed file truncation, we find the
969 * corresponding implicit back refs and check the following fields:
971 * (btrfs_header_owner(leaf), inode objectid, offset in file)
973 * Btree extents can be referenced by:
975 * - Different subvolumes
977 * Both the implicit back refs and the full back refs for tree blocks
978 * only consist of key. The key offset for the implicit back refs is
979 * objectid of block's owner tree. The key offset for the full back refs
980 * is the first byte of parent block.
982 * When implicit back refs is used, information about the lowest key and
983 * level of the tree block are required. These information are stored in
984 * tree block info structure.
987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
988 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
989 struct btrfs_root *root,
990 struct btrfs_path *path,
991 u64 owner, u32 extra_size)
993 struct btrfs_extent_item *item;
994 struct btrfs_extent_item_v0 *ei0;
995 struct btrfs_extent_ref_v0 *ref0;
996 struct btrfs_tree_block_info *bi;
997 struct extent_buffer *leaf;
998 struct btrfs_key key;
999 struct btrfs_key found_key;
1000 u32 new_size = sizeof(*item);
1004 leaf = path->nodes[0];
1005 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1008 ei0 = btrfs_item_ptr(leaf, path->slots[0],
1009 struct btrfs_extent_item_v0);
1010 refs = btrfs_extent_refs_v0(leaf, ei0);
1012 if (owner == (u64)-1) {
1014 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1015 ret = btrfs_next_leaf(root, path);
1018 BUG_ON(ret > 0); /* Corruption */
1019 leaf = path->nodes[0];
1021 btrfs_item_key_to_cpu(leaf, &found_key,
1023 BUG_ON(key.objectid != found_key.objectid);
1024 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1028 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1029 struct btrfs_extent_ref_v0);
1030 owner = btrfs_ref_objectid_v0(leaf, ref0);
1034 btrfs_release_path(path);
1036 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1037 new_size += sizeof(*bi);
1039 new_size -= sizeof(*ei0);
1040 ret = btrfs_search_slot(trans, root, &key, path,
1041 new_size + extra_size, 1);
1044 BUG_ON(ret); /* Corruption */
1046 btrfs_extend_item(root, path, new_size);
1048 leaf = path->nodes[0];
1049 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1050 btrfs_set_extent_refs(leaf, item, refs);
1051 /* FIXME: get real generation */
1052 btrfs_set_extent_generation(leaf, item, 0);
1053 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1054 btrfs_set_extent_flags(leaf, item,
1055 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1056 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1057 bi = (struct btrfs_tree_block_info *)(item + 1);
1058 /* FIXME: get first key of the block */
1059 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1060 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064 btrfs_mark_buffer_dirty(leaf);
1069 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 u32 high_crc = ~(u32)0;
1072 u32 low_crc = ~(u32)0;
1075 lenum = cpu_to_le64(root_objectid);
1076 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1077 lenum = cpu_to_le64(owner);
1078 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1079 lenum = cpu_to_le64(offset);
1080 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1082 return ((u64)high_crc << 31) ^ (u64)low_crc;
1085 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1086 struct btrfs_extent_data_ref *ref)
1088 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1089 btrfs_extent_data_ref_objectid(leaf, ref),
1090 btrfs_extent_data_ref_offset(leaf, ref));
1093 static int match_extent_data_ref(struct extent_buffer *leaf,
1094 struct btrfs_extent_data_ref *ref,
1095 u64 root_objectid, u64 owner, u64 offset)
1097 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1098 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1099 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1104 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1105 struct btrfs_root *root,
1106 struct btrfs_path *path,
1107 u64 bytenr, u64 parent,
1109 u64 owner, u64 offset)
1111 struct btrfs_key key;
1112 struct btrfs_extent_data_ref *ref;
1113 struct extent_buffer *leaf;
1119 key.objectid = bytenr;
1121 key.type = BTRFS_SHARED_DATA_REF_KEY;
1122 key.offset = parent;
1124 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1125 key.offset = hash_extent_data_ref(root_objectid,
1130 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1139 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1140 key.type = BTRFS_EXTENT_REF_V0_KEY;
1141 btrfs_release_path(path);
1142 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1153 leaf = path->nodes[0];
1154 nritems = btrfs_header_nritems(leaf);
1156 if (path->slots[0] >= nritems) {
1157 ret = btrfs_next_leaf(root, path);
1163 leaf = path->nodes[0];
1164 nritems = btrfs_header_nritems(leaf);
1168 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1169 if (key.objectid != bytenr ||
1170 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1173 ref = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_extent_data_ref);
1176 if (match_extent_data_ref(leaf, ref, root_objectid,
1179 btrfs_release_path(path);
1191 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1192 struct btrfs_root *root,
1193 struct btrfs_path *path,
1194 u64 bytenr, u64 parent,
1195 u64 root_objectid, u64 owner,
1196 u64 offset, int refs_to_add)
1198 struct btrfs_key key;
1199 struct extent_buffer *leaf;
1204 key.objectid = bytenr;
1206 key.type = BTRFS_SHARED_DATA_REF_KEY;
1207 key.offset = parent;
1208 size = sizeof(struct btrfs_shared_data_ref);
1210 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1211 key.offset = hash_extent_data_ref(root_objectid,
1213 size = sizeof(struct btrfs_extent_data_ref);
1216 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1217 if (ret && ret != -EEXIST)
1220 leaf = path->nodes[0];
1222 struct btrfs_shared_data_ref *ref;
1223 ref = btrfs_item_ptr(leaf, path->slots[0],
1224 struct btrfs_shared_data_ref);
1226 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1229 num_refs += refs_to_add;
1230 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1233 struct btrfs_extent_data_ref *ref;
1234 while (ret == -EEXIST) {
1235 ref = btrfs_item_ptr(leaf, path->slots[0],
1236 struct btrfs_extent_data_ref);
1237 if (match_extent_data_ref(leaf, ref, root_objectid,
1240 btrfs_release_path(path);
1242 ret = btrfs_insert_empty_item(trans, root, path, &key,
1244 if (ret && ret != -EEXIST)
1247 leaf = path->nodes[0];
1249 ref = btrfs_item_ptr(leaf, path->slots[0],
1250 struct btrfs_extent_data_ref);
1252 btrfs_set_extent_data_ref_root(leaf, ref,
1254 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1255 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1256 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1259 num_refs += refs_to_add;
1260 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1263 btrfs_mark_buffer_dirty(leaf);
1266 btrfs_release_path(path);
1270 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1271 struct btrfs_root *root,
1272 struct btrfs_path *path,
1275 struct btrfs_key key;
1276 struct btrfs_extent_data_ref *ref1 = NULL;
1277 struct btrfs_shared_data_ref *ref2 = NULL;
1278 struct extent_buffer *leaf;
1282 leaf = path->nodes[0];
1283 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1286 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1287 struct btrfs_extent_data_ref);
1288 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1289 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1290 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1291 struct btrfs_shared_data_ref);
1292 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1294 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1295 struct btrfs_extent_ref_v0 *ref0;
1296 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1297 struct btrfs_extent_ref_v0);
1298 num_refs = btrfs_ref_count_v0(leaf, ref0);
1304 BUG_ON(num_refs < refs_to_drop);
1305 num_refs -= refs_to_drop;
1307 if (num_refs == 0) {
1308 ret = btrfs_del_item(trans, root, path);
1310 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1311 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1312 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1313 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316 struct btrfs_extent_ref_v0 *ref0;
1317 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1318 struct btrfs_extent_ref_v0);
1319 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1322 btrfs_mark_buffer_dirty(leaf);
1327 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1328 struct btrfs_path *path,
1329 struct btrfs_extent_inline_ref *iref)
1331 struct btrfs_key key;
1332 struct extent_buffer *leaf;
1333 struct btrfs_extent_data_ref *ref1;
1334 struct btrfs_shared_data_ref *ref2;
1337 leaf = path->nodes[0];
1338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1341 BTRFS_EXTENT_DATA_REF_KEY) {
1342 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1343 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1346 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1349 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1350 struct btrfs_extent_data_ref);
1351 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1352 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1353 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1354 struct btrfs_shared_data_ref);
1355 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1358 struct btrfs_extent_ref_v0 *ref0;
1359 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1360 struct btrfs_extent_ref_v0);
1361 num_refs = btrfs_ref_count_v0(leaf, ref0);
1369 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1370 struct btrfs_root *root,
1371 struct btrfs_path *path,
1372 u64 bytenr, u64 parent,
1375 struct btrfs_key key;
1378 key.objectid = bytenr;
1380 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381 key.offset = parent;
1383 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384 key.offset = root_objectid;
1387 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1390 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1391 if (ret == -ENOENT && parent) {
1392 btrfs_release_path(path);
1393 key.type = BTRFS_EXTENT_REF_V0_KEY;
1394 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1402 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1403 struct btrfs_root *root,
1404 struct btrfs_path *path,
1405 u64 bytenr, u64 parent,
1408 struct btrfs_key key;
1411 key.objectid = bytenr;
1413 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1414 key.offset = parent;
1416 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1417 key.offset = root_objectid;
1420 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1421 btrfs_release_path(path);
1425 static inline int extent_ref_type(u64 parent, u64 owner)
1428 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430 type = BTRFS_SHARED_BLOCK_REF_KEY;
1432 type = BTRFS_TREE_BLOCK_REF_KEY;
1435 type = BTRFS_SHARED_DATA_REF_KEY;
1437 type = BTRFS_EXTENT_DATA_REF_KEY;
1442 static int find_next_key(struct btrfs_path *path, int level,
1443 struct btrfs_key *key)
1446 for (; level < BTRFS_MAX_LEVEL; level++) {
1447 if (!path->nodes[level])
1449 if (path->slots[level] + 1 >=
1450 btrfs_header_nritems(path->nodes[level]))
1453 btrfs_item_key_to_cpu(path->nodes[level], key,
1454 path->slots[level] + 1);
1456 btrfs_node_key_to_cpu(path->nodes[level], key,
1457 path->slots[level] + 1);
1464 * look for inline back ref. if back ref is found, *ref_ret is set
1465 * to the address of inline back ref, and 0 is returned.
1467 * if back ref isn't found, *ref_ret is set to the address where it
1468 * should be inserted, and -ENOENT is returned.
1470 * if insert is true and there are too many inline back refs, the path
1471 * points to the extent item, and -EAGAIN is returned.
1473 * NOTE: inline back refs are ordered in the same way that back ref
1474 * items in the tree are ordered.
1476 static noinline_for_stack
1477 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478 struct btrfs_root *root,
1479 struct btrfs_path *path,
1480 struct btrfs_extent_inline_ref **ref_ret,
1481 u64 bytenr, u64 num_bytes,
1482 u64 parent, u64 root_objectid,
1483 u64 owner, u64 offset, int insert)
1485 struct btrfs_key key;
1486 struct extent_buffer *leaf;
1487 struct btrfs_extent_item *ei;
1488 struct btrfs_extent_inline_ref *iref;
1498 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1501 key.objectid = bytenr;
1502 key.type = BTRFS_EXTENT_ITEM_KEY;
1503 key.offset = num_bytes;
1505 want = extent_ref_type(parent, owner);
1507 extra_size = btrfs_extent_inline_ref_size(want);
1508 path->keep_locks = 1;
1513 * Owner is our parent level, so we can just add one to get the level
1514 * for the block we are interested in.
1516 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1517 key.type = BTRFS_METADATA_ITEM_KEY;
1522 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1529 * We may be a newly converted file system which still has the old fat
1530 * extent entries for metadata, so try and see if we have one of those.
1532 if (ret > 0 && skinny_metadata) {
1533 skinny_metadata = false;
1534 if (path->slots[0]) {
1536 btrfs_item_key_to_cpu(path->nodes[0], &key,
1538 if (key.objectid == bytenr &&
1539 key.type == BTRFS_EXTENT_ITEM_KEY &&
1540 key.offset == num_bytes)
1544 key.type = BTRFS_EXTENT_ITEM_KEY;
1545 key.offset = num_bytes;
1546 btrfs_release_path(path);
1551 if (ret && !insert) {
1560 leaf = path->nodes[0];
1561 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563 if (item_size < sizeof(*ei)) {
1568 ret = convert_extent_item_v0(trans, root, path, owner,
1574 leaf = path->nodes[0];
1575 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1578 BUG_ON(item_size < sizeof(*ei));
1580 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581 flags = btrfs_extent_flags(leaf, ei);
1583 ptr = (unsigned long)(ei + 1);
1584 end = (unsigned long)ei + item_size;
1586 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587 ptr += sizeof(struct btrfs_tree_block_info);
1597 iref = (struct btrfs_extent_inline_ref *)ptr;
1598 type = btrfs_extent_inline_ref_type(leaf, iref);
1602 ptr += btrfs_extent_inline_ref_size(type);
1606 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607 struct btrfs_extent_data_ref *dref;
1608 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609 if (match_extent_data_ref(leaf, dref, root_objectid,
1614 if (hash_extent_data_ref_item(leaf, dref) <
1615 hash_extent_data_ref(root_objectid, owner, offset))
1619 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1621 if (parent == ref_offset) {
1625 if (ref_offset < parent)
1628 if (root_objectid == ref_offset) {
1632 if (ref_offset < root_objectid)
1636 ptr += btrfs_extent_inline_ref_size(type);
1638 if (err == -ENOENT && insert) {
1639 if (item_size + extra_size >=
1640 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1645 * To add new inline back ref, we have to make sure
1646 * there is no corresponding back ref item.
1647 * For simplicity, we just do not add new inline back
1648 * ref if there is any kind of item for this block
1650 if (find_next_key(path, 0, &key) == 0 &&
1651 key.objectid == bytenr &&
1652 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1657 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1660 path->keep_locks = 0;
1661 btrfs_unlock_up_safe(path, 1);
1667 * helper to add new inline back ref
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671 struct btrfs_path *path,
1672 struct btrfs_extent_inline_ref *iref,
1673 u64 parent, u64 root_objectid,
1674 u64 owner, u64 offset, int refs_to_add,
1675 struct btrfs_delayed_extent_op *extent_op)
1677 struct extent_buffer *leaf;
1678 struct btrfs_extent_item *ei;
1681 unsigned long item_offset;
1686 leaf = path->nodes[0];
1687 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688 item_offset = (unsigned long)iref - (unsigned long)ei;
1690 type = extent_ref_type(parent, owner);
1691 size = btrfs_extent_inline_ref_size(type);
1693 btrfs_extend_item(root, path, size);
1695 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696 refs = btrfs_extent_refs(leaf, ei);
1697 refs += refs_to_add;
1698 btrfs_set_extent_refs(leaf, ei, refs);
1700 __run_delayed_extent_op(extent_op, leaf, ei);
1702 ptr = (unsigned long)ei + item_offset;
1703 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704 if (ptr < end - size)
1705 memmove_extent_buffer(leaf, ptr + size, ptr,
1708 iref = (struct btrfs_extent_inline_ref *)ptr;
1709 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711 struct btrfs_extent_data_ref *dref;
1712 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718 struct btrfs_shared_data_ref *sref;
1719 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1725 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1727 btrfs_mark_buffer_dirty(leaf);
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731 struct btrfs_root *root,
1732 struct btrfs_path *path,
1733 struct btrfs_extent_inline_ref **ref_ret,
1734 u64 bytenr, u64 num_bytes, u64 parent,
1735 u64 root_objectid, u64 owner, u64 offset)
1739 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740 bytenr, num_bytes, parent,
1741 root_objectid, owner, offset, 0);
1745 btrfs_release_path(path);
1748 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1752 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753 root_objectid, owner, offset);
1759 * helper to update/remove inline back ref
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763 struct btrfs_path *path,
1764 struct btrfs_extent_inline_ref *iref,
1766 struct btrfs_delayed_extent_op *extent_op)
1768 struct extent_buffer *leaf;
1769 struct btrfs_extent_item *ei;
1770 struct btrfs_extent_data_ref *dref = NULL;
1771 struct btrfs_shared_data_ref *sref = NULL;
1779 leaf = path->nodes[0];
1780 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781 refs = btrfs_extent_refs(leaf, ei);
1782 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783 refs += refs_to_mod;
1784 btrfs_set_extent_refs(leaf, ei, refs);
1786 __run_delayed_extent_op(extent_op, leaf, ei);
1788 type = btrfs_extent_inline_ref_type(leaf, iref);
1790 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792 refs = btrfs_extent_data_ref_count(leaf, dref);
1793 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795 refs = btrfs_shared_data_ref_count(leaf, sref);
1798 BUG_ON(refs_to_mod != -1);
1801 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802 refs += refs_to_mod;
1805 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1808 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1810 size = btrfs_extent_inline_ref_size(type);
1811 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812 ptr = (unsigned long)iref;
1813 end = (unsigned long)ei + item_size;
1814 if (ptr + size < end)
1815 memmove_extent_buffer(leaf, ptr, ptr + size,
1818 btrfs_truncate_item(root, path, item_size, 1);
1820 btrfs_mark_buffer_dirty(leaf);
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825 struct btrfs_root *root,
1826 struct btrfs_path *path,
1827 u64 bytenr, u64 num_bytes, u64 parent,
1828 u64 root_objectid, u64 owner,
1829 u64 offset, int refs_to_add,
1830 struct btrfs_delayed_extent_op *extent_op)
1832 struct btrfs_extent_inline_ref *iref;
1835 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836 bytenr, num_bytes, parent,
1837 root_objectid, owner, offset, 1);
1839 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840 update_inline_extent_backref(root, path, iref,
1841 refs_to_add, extent_op);
1842 } else if (ret == -ENOENT) {
1843 setup_inline_extent_backref(root, path, iref, parent,
1844 root_objectid, owner, offset,
1845 refs_to_add, extent_op);
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852 struct btrfs_root *root,
1853 struct btrfs_path *path,
1854 u64 bytenr, u64 parent, u64 root_objectid,
1855 u64 owner, u64 offset, int refs_to_add)
1858 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859 BUG_ON(refs_to_add != 1);
1860 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861 parent, root_objectid);
1863 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864 parent, root_objectid,
1865 owner, offset, refs_to_add);
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871 struct btrfs_root *root,
1872 struct btrfs_path *path,
1873 struct btrfs_extent_inline_ref *iref,
1874 int refs_to_drop, int is_data)
1878 BUG_ON(!is_data && refs_to_drop != 1);
1880 update_inline_extent_backref(root, path, iref,
1881 -refs_to_drop, NULL);
1882 } else if (is_data) {
1883 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1885 ret = btrfs_del_item(trans, root, path);
1890 static int btrfs_issue_discard(struct block_device *bdev,
1893 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897 u64 num_bytes, u64 *actual_bytes)
1900 u64 discarded_bytes = 0;
1901 struct btrfs_bio *bbio = NULL;
1904 /* Tell the block device(s) that the sectors can be discarded */
1905 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906 bytenr, &num_bytes, &bbio, 0);
1907 /* Error condition is -ENOMEM */
1909 struct btrfs_bio_stripe *stripe = bbio->stripes;
1913 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914 if (!stripe->dev->can_discard)
1917 ret = btrfs_issue_discard(stripe->dev->bdev,
1921 discarded_bytes += stripe->length;
1922 else if (ret != -EOPNOTSUPP)
1923 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1926 * Just in case we get back EOPNOTSUPP for some reason,
1927 * just ignore the return value so we don't screw up
1928 * people calling discard_extent.
1936 *actual_bytes = discarded_bytes;
1939 if (ret == -EOPNOTSUPP)
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946 struct btrfs_root *root,
1947 u64 bytenr, u64 num_bytes, u64 parent,
1948 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1951 struct btrfs_fs_info *fs_info = root->fs_info;
1953 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1956 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1959 parent, root_objectid, (int)owner,
1960 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1962 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1964 parent, root_objectid, owner, offset,
1965 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971 struct btrfs_root *root,
1972 u64 bytenr, u64 num_bytes,
1973 u64 parent, u64 root_objectid,
1974 u64 owner, u64 offset, int refs_to_add,
1975 struct btrfs_delayed_extent_op *extent_op)
1977 struct btrfs_path *path;
1978 struct extent_buffer *leaf;
1979 struct btrfs_extent_item *item;
1983 path = btrfs_alloc_path();
1988 path->leave_spinning = 1;
1989 /* this will setup the path even if it fails to insert the back ref */
1990 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991 path, bytenr, num_bytes, parent,
1992 root_objectid, owner, offset,
1993 refs_to_add, extent_op);
1997 leaf = path->nodes[0];
1998 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999 refs = btrfs_extent_refs(leaf, item);
2000 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2002 __run_delayed_extent_op(extent_op, leaf, item);
2004 btrfs_mark_buffer_dirty(leaf);
2005 btrfs_release_path(path);
2008 path->leave_spinning = 1;
2010 /* now insert the actual backref */
2011 ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012 path, bytenr, parent, root_objectid,
2013 owner, offset, refs_to_add);
2015 btrfs_abort_transaction(trans, root, ret);
2017 btrfs_free_path(path);
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022 struct btrfs_root *root,
2023 struct btrfs_delayed_ref_node *node,
2024 struct btrfs_delayed_extent_op *extent_op,
2025 int insert_reserved)
2028 struct btrfs_delayed_data_ref *ref;
2029 struct btrfs_key ins;
2034 ins.objectid = node->bytenr;
2035 ins.offset = node->num_bytes;
2036 ins.type = BTRFS_EXTENT_ITEM_KEY;
2038 ref = btrfs_delayed_node_to_data_ref(node);
2039 trace_run_delayed_data_ref(node, ref, node->action);
2041 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042 parent = ref->parent;
2044 ref_root = ref->root;
2046 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2048 flags |= extent_op->flags_to_set;
2049 ret = alloc_reserved_file_extent(trans, root,
2050 parent, ref_root, flags,
2051 ref->objectid, ref->offset,
2052 &ins, node->ref_mod);
2053 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055 node->num_bytes, parent,
2056 ref_root, ref->objectid,
2057 ref->offset, node->ref_mod,
2059 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061 node->num_bytes, parent,
2062 ref_root, ref->objectid,
2063 ref->offset, node->ref_mod,
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072 struct extent_buffer *leaf,
2073 struct btrfs_extent_item *ei)
2075 u64 flags = btrfs_extent_flags(leaf, ei);
2076 if (extent_op->update_flags) {
2077 flags |= extent_op->flags_to_set;
2078 btrfs_set_extent_flags(leaf, ei, flags);
2081 if (extent_op->update_key) {
2082 struct btrfs_tree_block_info *bi;
2083 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090 struct btrfs_root *root,
2091 struct btrfs_delayed_ref_node *node,
2092 struct btrfs_delayed_extent_op *extent_op)
2094 struct btrfs_key key;
2095 struct btrfs_path *path;
2096 struct btrfs_extent_item *ei;
2097 struct extent_buffer *leaf;
2101 int metadata = !extent_op->is_data;
2106 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2109 path = btrfs_alloc_path();
2113 key.objectid = node->bytenr;
2116 key.type = BTRFS_METADATA_ITEM_KEY;
2117 key.offset = extent_op->level;
2119 key.type = BTRFS_EXTENT_ITEM_KEY;
2120 key.offset = node->num_bytes;
2125 path->leave_spinning = 1;
2126 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2134 btrfs_release_path(path);
2137 key.offset = node->num_bytes;
2138 key.type = BTRFS_EXTENT_ITEM_KEY;
2145 leaf = path->nodes[0];
2146 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2147 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2148 if (item_size < sizeof(*ei)) {
2149 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2155 leaf = path->nodes[0];
2156 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2159 BUG_ON(item_size < sizeof(*ei));
2160 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2161 __run_delayed_extent_op(extent_op, leaf, ei);
2163 btrfs_mark_buffer_dirty(leaf);
2165 btrfs_free_path(path);
2169 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2170 struct btrfs_root *root,
2171 struct btrfs_delayed_ref_node *node,
2172 struct btrfs_delayed_extent_op *extent_op,
2173 int insert_reserved)
2176 struct btrfs_delayed_tree_ref *ref;
2177 struct btrfs_key ins;
2180 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2183 ref = btrfs_delayed_node_to_tree_ref(node);
2184 trace_run_delayed_tree_ref(node, ref, node->action);
2186 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2187 parent = ref->parent;
2189 ref_root = ref->root;
2191 ins.objectid = node->bytenr;
2192 if (skinny_metadata) {
2193 ins.offset = ref->level;
2194 ins.type = BTRFS_METADATA_ITEM_KEY;
2196 ins.offset = node->num_bytes;
2197 ins.type = BTRFS_EXTENT_ITEM_KEY;
2200 BUG_ON(node->ref_mod != 1);
2201 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2202 BUG_ON(!extent_op || !extent_op->update_flags);
2203 ret = alloc_reserved_tree_block(trans, root,
2205 extent_op->flags_to_set,
2208 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2209 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2210 node->num_bytes, parent, ref_root,
2211 ref->level, 0, 1, extent_op);
2212 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2213 ret = __btrfs_free_extent(trans, root, node->bytenr,
2214 node->num_bytes, parent, ref_root,
2215 ref->level, 0, 1, extent_op);
2222 /* helper function to actually process a single delayed ref entry */
2223 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2224 struct btrfs_root *root,
2225 struct btrfs_delayed_ref_node *node,
2226 struct btrfs_delayed_extent_op *extent_op,
2227 int insert_reserved)
2231 if (trans->aborted) {
2232 if (insert_reserved)
2233 btrfs_pin_extent(root, node->bytenr,
2234 node->num_bytes, 1);
2238 if (btrfs_delayed_ref_is_head(node)) {
2239 struct btrfs_delayed_ref_head *head;
2241 * we've hit the end of the chain and we were supposed
2242 * to insert this extent into the tree. But, it got
2243 * deleted before we ever needed to insert it, so all
2244 * we have to do is clean up the accounting
2247 head = btrfs_delayed_node_to_head(node);
2248 trace_run_delayed_ref_head(node, head, node->action);
2250 if (insert_reserved) {
2251 btrfs_pin_extent(root, node->bytenr,
2252 node->num_bytes, 1);
2253 if (head->is_data) {
2254 ret = btrfs_del_csums(trans, root,
2262 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2263 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2264 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2266 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2267 node->type == BTRFS_SHARED_DATA_REF_KEY)
2268 ret = run_delayed_data_ref(trans, root, node, extent_op,
2275 static noinline struct btrfs_delayed_ref_node *
2276 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2278 struct rb_node *node;
2279 struct btrfs_delayed_ref_node *ref;
2280 int action = BTRFS_ADD_DELAYED_REF;
2283 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2284 * this prevents ref count from going down to zero when
2285 * there still are pending delayed ref.
2287 node = rb_prev(&head->node.rb_node);
2291 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2293 if (ref->bytenr != head->node.bytenr)
2295 if (ref->action == action)
2297 node = rb_prev(node);
2299 if (action == BTRFS_ADD_DELAYED_REF) {
2300 action = BTRFS_DROP_DELAYED_REF;
2307 * Returns 0 on success or if called with an already aborted transaction.
2308 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2310 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2311 struct btrfs_root *root,
2312 struct list_head *cluster)
2314 struct btrfs_delayed_ref_root *delayed_refs;
2315 struct btrfs_delayed_ref_node *ref;
2316 struct btrfs_delayed_ref_head *locked_ref = NULL;
2317 struct btrfs_delayed_extent_op *extent_op;
2318 struct btrfs_fs_info *fs_info = root->fs_info;
2321 int must_insert_reserved = 0;
2323 delayed_refs = &trans->transaction->delayed_refs;
2326 /* pick a new head ref from the cluster list */
2327 if (list_empty(cluster))
2330 locked_ref = list_entry(cluster->next,
2331 struct btrfs_delayed_ref_head, cluster);
2333 /* grab the lock that says we are going to process
2334 * all the refs for this head */
2335 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2338 * we may have dropped the spin lock to get the head
2339 * mutex lock, and that might have given someone else
2340 * time to free the head. If that's true, it has been
2341 * removed from our list and we can move on.
2343 if (ret == -EAGAIN) {
2351 * We need to try and merge add/drops of the same ref since we
2352 * can run into issues with relocate dropping the implicit ref
2353 * and then it being added back again before the drop can
2354 * finish. If we merged anything we need to re-loop so we can
2357 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2361 * locked_ref is the head node, so we have to go one
2362 * node back for any delayed ref updates
2364 ref = select_delayed_ref(locked_ref);
2366 if (ref && ref->seq &&
2367 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2369 * there are still refs with lower seq numbers in the
2370 * process of being added. Don't run this ref yet.
2372 list_del_init(&locked_ref->cluster);
2373 btrfs_delayed_ref_unlock(locked_ref);
2375 delayed_refs->num_heads_ready++;
2376 spin_unlock(&delayed_refs->lock);
2378 spin_lock(&delayed_refs->lock);
2383 * record the must insert reserved flag before we
2384 * drop the spin lock.
2386 must_insert_reserved = locked_ref->must_insert_reserved;
2387 locked_ref->must_insert_reserved = 0;
2389 extent_op = locked_ref->extent_op;
2390 locked_ref->extent_op = NULL;
2393 /* All delayed refs have been processed, Go ahead
2394 * and send the head node to run_one_delayed_ref,
2395 * so that any accounting fixes can happen
2397 ref = &locked_ref->node;
2399 if (extent_op && must_insert_reserved) {
2400 btrfs_free_delayed_extent_op(extent_op);
2405 spin_unlock(&delayed_refs->lock);
2407 ret = run_delayed_extent_op(trans, root,
2409 btrfs_free_delayed_extent_op(extent_op);
2413 * Need to reset must_insert_reserved if
2414 * there was an error so the abort stuff
2415 * can cleanup the reserved space
2418 if (must_insert_reserved)
2419 locked_ref->must_insert_reserved = 1;
2420 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2421 spin_lock(&delayed_refs->lock);
2422 btrfs_delayed_ref_unlock(locked_ref);
2431 rb_erase(&ref->rb_node, &delayed_refs->root);
2432 delayed_refs->num_entries--;
2433 if (!btrfs_delayed_ref_is_head(ref)) {
2435 * when we play the delayed ref, also correct the
2438 switch (ref->action) {
2439 case BTRFS_ADD_DELAYED_REF:
2440 case BTRFS_ADD_DELAYED_EXTENT:
2441 locked_ref->node.ref_mod -= ref->ref_mod;
2443 case BTRFS_DROP_DELAYED_REF:
2444 locked_ref->node.ref_mod += ref->ref_mod;
2450 list_del_init(&locked_ref->cluster);
2452 spin_unlock(&delayed_refs->lock);
2454 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2455 must_insert_reserved);
2457 btrfs_free_delayed_extent_op(extent_op);
2459 btrfs_delayed_ref_unlock(locked_ref);
2460 btrfs_put_delayed_ref(ref);
2461 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2462 spin_lock(&delayed_refs->lock);
2467 * If this node is a head, that means all the refs in this head
2468 * have been dealt with, and we will pick the next head to deal
2469 * with, so we must unlock the head and drop it from the cluster
2470 * list before we release it.
2472 if (btrfs_delayed_ref_is_head(ref)) {
2473 btrfs_delayed_ref_unlock(locked_ref);
2476 btrfs_put_delayed_ref(ref);
2480 spin_lock(&delayed_refs->lock);
2485 #ifdef SCRAMBLE_DELAYED_REFS
2487 * Normally delayed refs get processed in ascending bytenr order. This
2488 * correlates in most cases to the order added. To expose dependencies on this
2489 * order, we start to process the tree in the middle instead of the beginning
2491 static u64 find_middle(struct rb_root *root)
2493 struct rb_node *n = root->rb_node;
2494 struct btrfs_delayed_ref_node *entry;
2497 u64 first = 0, last = 0;
2501 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2502 first = entry->bytenr;
2506 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2507 last = entry->bytenr;
2512 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2513 WARN_ON(!entry->in_tree);
2515 middle = entry->bytenr;
2528 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2529 struct btrfs_fs_info *fs_info)
2531 struct qgroup_update *qgroup_update;
2534 if (list_empty(&trans->qgroup_ref_list) !=
2535 !trans->delayed_ref_elem.seq) {
2536 /* list without seq or seq without list */
2538 "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2539 list_empty(&trans->qgroup_ref_list) ? "" : " not",
2540 (u32)(trans->delayed_ref_elem.seq >> 32),
2541 (u32)trans->delayed_ref_elem.seq);
2545 if (!trans->delayed_ref_elem.seq)
2548 while (!list_empty(&trans->qgroup_ref_list)) {
2549 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2550 struct qgroup_update, list);
2551 list_del(&qgroup_update->list);
2553 ret = btrfs_qgroup_account_ref(
2554 trans, fs_info, qgroup_update->node,
2555 qgroup_update->extent_op);
2556 kfree(qgroup_update);
2559 btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2564 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2567 int val = atomic_read(&delayed_refs->ref_seq);
2569 if (val < seq || val >= seq + count)
2574 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2578 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2579 sizeof(struct btrfs_extent_inline_ref));
2580 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2581 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2584 * We don't ever fill up leaves all the way so multiply by 2 just to be
2585 * closer to what we're really going to want to ouse.
2587 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2590 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2591 struct btrfs_root *root)
2593 struct btrfs_block_rsv *global_rsv;
2594 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2598 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2599 num_heads = heads_to_leaves(root, num_heads);
2601 num_bytes += (num_heads - 1) * root->leafsize;
2603 global_rsv = &root->fs_info->global_block_rsv;
2606 * If we can't allocate any more chunks lets make sure we have _lots_ of
2607 * wiggle room since running delayed refs can create more delayed refs.
2609 if (global_rsv->space_info->full)
2612 spin_lock(&global_rsv->lock);
2613 if (global_rsv->reserved <= num_bytes)
2615 spin_unlock(&global_rsv->lock);
2620 * this starts processing the delayed reference count updates and
2621 * extent insertions we have queued up so far. count can be
2622 * 0, which means to process everything in the tree at the start
2623 * of the run (but not newly added entries), or it can be some target
2624 * number you'd like to process.
2626 * Returns 0 on success or if called with an aborted transaction
2627 * Returns <0 on error and aborts the transaction
2629 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2630 struct btrfs_root *root, unsigned long count)
2632 struct rb_node *node;
2633 struct btrfs_delayed_ref_root *delayed_refs;
2634 struct btrfs_delayed_ref_node *ref;
2635 struct list_head cluster;
2638 int run_all = count == (unsigned long)-1;
2642 /* We'll clean this up in btrfs_cleanup_transaction */
2646 if (root == root->fs_info->extent_root)
2647 root = root->fs_info->tree_root;
2649 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2651 delayed_refs = &trans->transaction->delayed_refs;
2652 INIT_LIST_HEAD(&cluster);
2654 count = delayed_refs->num_entries * 2;
2658 if (!run_all && !run_most) {
2660 int seq = atomic_read(&delayed_refs->ref_seq);
2663 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2665 DEFINE_WAIT(__wait);
2666 if (delayed_refs->flushing ||
2667 !btrfs_should_throttle_delayed_refs(trans, root))
2670 prepare_to_wait(&delayed_refs->wait, &__wait,
2671 TASK_UNINTERRUPTIBLE);
2673 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2676 finish_wait(&delayed_refs->wait, &__wait);
2678 if (!refs_newer(delayed_refs, seq, 256))
2683 finish_wait(&delayed_refs->wait, &__wait);
2689 atomic_inc(&delayed_refs->procs_running_refs);
2694 spin_lock(&delayed_refs->lock);
2696 #ifdef SCRAMBLE_DELAYED_REFS
2697 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2701 if (!(run_all || run_most) &&
2702 !btrfs_should_throttle_delayed_refs(trans, root))
2706 * go find something we can process in the rbtree. We start at
2707 * the beginning of the tree, and then build a cluster
2708 * of refs to process starting at the first one we are able to
2711 delayed_start = delayed_refs->run_delayed_start;
2712 ret = btrfs_find_ref_cluster(trans, &cluster,
2713 delayed_refs->run_delayed_start);
2717 ret = run_clustered_refs(trans, root, &cluster);
2719 btrfs_release_ref_cluster(&cluster);
2720 spin_unlock(&delayed_refs->lock);
2721 btrfs_abort_transaction(trans, root, ret);
2722 atomic_dec(&delayed_refs->procs_running_refs);
2723 wake_up(&delayed_refs->wait);
2727 atomic_add(ret, &delayed_refs->ref_seq);
2729 count -= min_t(unsigned long, ret, count);
2734 if (delayed_start >= delayed_refs->run_delayed_start) {
2737 * btrfs_find_ref_cluster looped. let's do one
2738 * more cycle. if we don't run any delayed ref
2739 * during that cycle (because we can't because
2740 * all of them are blocked), bail out.
2745 * no runnable refs left, stop trying
2752 /* refs were run, let's reset staleness detection */
2758 if (!list_empty(&trans->new_bgs)) {
2759 spin_unlock(&delayed_refs->lock);
2760 btrfs_create_pending_block_groups(trans, root);
2761 spin_lock(&delayed_refs->lock);
2764 node = rb_first(&delayed_refs->root);
2767 count = (unsigned long)-1;
2770 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2772 if (btrfs_delayed_ref_is_head(ref)) {
2773 struct btrfs_delayed_ref_head *head;
2775 head = btrfs_delayed_node_to_head(ref);
2776 atomic_inc(&ref->refs);
2778 spin_unlock(&delayed_refs->lock);
2780 * Mutex was contended, block until it's
2781 * released and try again
2783 mutex_lock(&head->mutex);
2784 mutex_unlock(&head->mutex);
2786 btrfs_put_delayed_ref(ref);
2790 node = rb_next(node);
2792 spin_unlock(&delayed_refs->lock);
2793 schedule_timeout(1);
2797 atomic_dec(&delayed_refs->procs_running_refs);
2799 if (waitqueue_active(&delayed_refs->wait))
2800 wake_up(&delayed_refs->wait);
2802 spin_unlock(&delayed_refs->lock);
2803 assert_qgroups_uptodate(trans);
2807 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2808 struct btrfs_root *root,
2809 u64 bytenr, u64 num_bytes, u64 flags,
2810 int level, int is_data)
2812 struct btrfs_delayed_extent_op *extent_op;
2815 extent_op = btrfs_alloc_delayed_extent_op();
2819 extent_op->flags_to_set = flags;
2820 extent_op->update_flags = 1;
2821 extent_op->update_key = 0;
2822 extent_op->is_data = is_data ? 1 : 0;
2823 extent_op->level = level;
2825 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2826 num_bytes, extent_op);
2828 btrfs_free_delayed_extent_op(extent_op);
2832 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2833 struct btrfs_root *root,
2834 struct btrfs_path *path,
2835 u64 objectid, u64 offset, u64 bytenr)
2837 struct btrfs_delayed_ref_head *head;
2838 struct btrfs_delayed_ref_node *ref;
2839 struct btrfs_delayed_data_ref *data_ref;
2840 struct btrfs_delayed_ref_root *delayed_refs;
2841 struct rb_node *node;
2845 delayed_refs = &trans->transaction->delayed_refs;
2846 spin_lock(&delayed_refs->lock);
2847 head = btrfs_find_delayed_ref_head(trans, bytenr);
2851 if (!mutex_trylock(&head->mutex)) {
2852 atomic_inc(&head->node.refs);
2853 spin_unlock(&delayed_refs->lock);
2855 btrfs_release_path(path);
2858 * Mutex was contended, block until it's released and let
2861 mutex_lock(&head->mutex);
2862 mutex_unlock(&head->mutex);
2863 btrfs_put_delayed_ref(&head->node);
2867 node = rb_prev(&head->node.rb_node);
2871 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2873 if (ref->bytenr != bytenr)
2877 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2880 data_ref = btrfs_delayed_node_to_data_ref(ref);
2882 node = rb_prev(node);
2886 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2887 if (ref->bytenr == bytenr && ref->seq == seq)
2891 if (data_ref->root != root->root_key.objectid ||
2892 data_ref->objectid != objectid || data_ref->offset != offset)
2897 mutex_unlock(&head->mutex);
2899 spin_unlock(&delayed_refs->lock);
2903 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2904 struct btrfs_root *root,
2905 struct btrfs_path *path,
2906 u64 objectid, u64 offset, u64 bytenr)
2908 struct btrfs_root *extent_root = root->fs_info->extent_root;
2909 struct extent_buffer *leaf;
2910 struct btrfs_extent_data_ref *ref;
2911 struct btrfs_extent_inline_ref *iref;
2912 struct btrfs_extent_item *ei;
2913 struct btrfs_key key;
2917 key.objectid = bytenr;
2918 key.offset = (u64)-1;
2919 key.type = BTRFS_EXTENT_ITEM_KEY;
2921 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2924 BUG_ON(ret == 0); /* Corruption */
2927 if (path->slots[0] == 0)
2931 leaf = path->nodes[0];
2932 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2934 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2938 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2939 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2940 if (item_size < sizeof(*ei)) {
2941 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2945 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2947 if (item_size != sizeof(*ei) +
2948 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2951 if (btrfs_extent_generation(leaf, ei) <=
2952 btrfs_root_last_snapshot(&root->root_item))
2955 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2956 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2957 BTRFS_EXTENT_DATA_REF_KEY)
2960 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2961 if (btrfs_extent_refs(leaf, ei) !=
2962 btrfs_extent_data_ref_count(leaf, ref) ||
2963 btrfs_extent_data_ref_root(leaf, ref) !=
2964 root->root_key.objectid ||
2965 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2966 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2974 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2975 struct btrfs_root *root,
2976 u64 objectid, u64 offset, u64 bytenr)
2978 struct btrfs_path *path;
2982 path = btrfs_alloc_path();
2987 ret = check_committed_ref(trans, root, path, objectid,
2989 if (ret && ret != -ENOENT)
2992 ret2 = check_delayed_ref(trans, root, path, objectid,
2994 } while (ret2 == -EAGAIN);
2996 if (ret2 && ret2 != -ENOENT) {
3001 if (ret != -ENOENT || ret2 != -ENOENT)
3004 btrfs_free_path(path);
3005 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3010 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3011 struct btrfs_root *root,
3012 struct extent_buffer *buf,
3013 int full_backref, int inc, int for_cow)
3020 struct btrfs_key key;
3021 struct btrfs_file_extent_item *fi;
3025 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3026 u64, u64, u64, u64, u64, u64, int);
3028 ref_root = btrfs_header_owner(buf);
3029 nritems = btrfs_header_nritems(buf);
3030 level = btrfs_header_level(buf);
3032 if (!root->ref_cows && level == 0)
3036 process_func = btrfs_inc_extent_ref;
3038 process_func = btrfs_free_extent;
3041 parent = buf->start;
3045 for (i = 0; i < nritems; i++) {
3047 btrfs_item_key_to_cpu(buf, &key, i);
3048 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3050 fi = btrfs_item_ptr(buf, i,
3051 struct btrfs_file_extent_item);
3052 if (btrfs_file_extent_type(buf, fi) ==
3053 BTRFS_FILE_EXTENT_INLINE)
3055 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3059 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3060 key.offset -= btrfs_file_extent_offset(buf, fi);
3061 ret = process_func(trans, root, bytenr, num_bytes,
3062 parent, ref_root, key.objectid,
3063 key.offset, for_cow);
3067 bytenr = btrfs_node_blockptr(buf, i);
3068 num_bytes = btrfs_level_size(root, level - 1);
3069 ret = process_func(trans, root, bytenr, num_bytes,
3070 parent, ref_root, level - 1, 0,
3081 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3082 struct extent_buffer *buf, int full_backref, int for_cow)
3084 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3087 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3088 struct extent_buffer *buf, int full_backref, int for_cow)
3090 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3093 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3094 struct btrfs_root *root,
3095 struct btrfs_path *path,
3096 struct btrfs_block_group_cache *cache)
3099 struct btrfs_root *extent_root = root->fs_info->extent_root;
3101 struct extent_buffer *leaf;
3103 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3106 BUG_ON(ret); /* Corruption */
3108 leaf = path->nodes[0];
3109 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3110 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3111 btrfs_mark_buffer_dirty(leaf);
3112 btrfs_release_path(path);
3115 btrfs_abort_transaction(trans, root, ret);
3122 static struct btrfs_block_group_cache *
3123 next_block_group(struct btrfs_root *root,
3124 struct btrfs_block_group_cache *cache)
3126 struct rb_node *node;
3127 spin_lock(&root->fs_info->block_group_cache_lock);
3128 node = rb_next(&cache->cache_node);
3129 btrfs_put_block_group(cache);
3131 cache = rb_entry(node, struct btrfs_block_group_cache,
3133 btrfs_get_block_group(cache);
3136 spin_unlock(&root->fs_info->block_group_cache_lock);
3140 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3141 struct btrfs_trans_handle *trans,
3142 struct btrfs_path *path)
3144 struct btrfs_root *root = block_group->fs_info->tree_root;
3145 struct inode *inode = NULL;
3147 int dcs = BTRFS_DC_ERROR;
3153 * If this block group is smaller than 100 megs don't bother caching the
3156 if (block_group->key.offset < (100 * 1024 * 1024)) {
3157 spin_lock(&block_group->lock);
3158 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3159 spin_unlock(&block_group->lock);
3164 inode = lookup_free_space_inode(root, block_group, path);
3165 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3166 ret = PTR_ERR(inode);
3167 btrfs_release_path(path);
3171 if (IS_ERR(inode)) {
3175 if (block_group->ro)
3178 ret = create_free_space_inode(root, trans, block_group, path);
3184 /* We've already setup this transaction, go ahead and exit */
3185 if (block_group->cache_generation == trans->transid &&
3186 i_size_read(inode)) {
3187 dcs = BTRFS_DC_SETUP;
3192 * We want to set the generation to 0, that way if anything goes wrong
3193 * from here on out we know not to trust this cache when we load up next
3196 BTRFS_I(inode)->generation = 0;
3197 ret = btrfs_update_inode(trans, root, inode);
3200 if (i_size_read(inode) > 0) {
3201 ret = btrfs_check_trunc_cache_free_space(root,
3202 &root->fs_info->global_block_rsv);
3206 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3211 spin_lock(&block_group->lock);
3212 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3213 !btrfs_test_opt(root, SPACE_CACHE)) {
3215 * don't bother trying to write stuff out _if_
3216 * a) we're not cached,
3217 * b) we're with nospace_cache mount option.
3219 dcs = BTRFS_DC_WRITTEN;
3220 spin_unlock(&block_group->lock);
3223 spin_unlock(&block_group->lock);
3226 * Try to preallocate enough space based on how big the block group is.
3227 * Keep in mind this has to include any pinned space which could end up
3228 * taking up quite a bit since it's not folded into the other space
3231 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3236 num_pages *= PAGE_CACHE_SIZE;
3238 ret = btrfs_check_data_free_space(inode, num_pages);
3242 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3243 num_pages, num_pages,
3246 dcs = BTRFS_DC_SETUP;
3247 btrfs_free_reserved_data_space(inode, num_pages);
3252 btrfs_release_path(path);
3254 spin_lock(&block_group->lock);
3255 if (!ret && dcs == BTRFS_DC_SETUP)
3256 block_group->cache_generation = trans->transid;
3257 block_group->disk_cache_state = dcs;
3258 spin_unlock(&block_group->lock);
3263 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3264 struct btrfs_root *root)
3266 struct btrfs_block_group_cache *cache;
3268 struct btrfs_path *path;
3271 path = btrfs_alloc_path();
3277 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3279 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3281 cache = next_block_group(root, cache);
3289 err = cache_save_setup(cache, trans, path);
3290 last = cache->key.objectid + cache->key.offset;
3291 btrfs_put_block_group(cache);
3296 err = btrfs_run_delayed_refs(trans, root,
3298 if (err) /* File system offline */
3302 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3304 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3305 btrfs_put_block_group(cache);
3311 cache = next_block_group(root, cache);
3320 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3321 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3323 last = cache->key.objectid + cache->key.offset;
3325 err = write_one_cache_group(trans, root, path, cache);
3326 btrfs_put_block_group(cache);
3327 if (err) /* File system offline */
3333 * I don't think this is needed since we're just marking our
3334 * preallocated extent as written, but just in case it can't
3338 err = btrfs_run_delayed_refs(trans, root,
3340 if (err) /* File system offline */
3344 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3347 * Really this shouldn't happen, but it could if we
3348 * couldn't write the entire preallocated extent and
3349 * splitting the extent resulted in a new block.
3352 btrfs_put_block_group(cache);
3355 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3357 cache = next_block_group(root, cache);
3366 err = btrfs_write_out_cache(root, trans, cache, path);
3369 * If we didn't have an error then the cache state is still
3370 * NEED_WRITE, so we can set it to WRITTEN.
3372 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3373 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3374 last = cache->key.objectid + cache->key.offset;
3375 btrfs_put_block_group(cache);
3379 btrfs_free_path(path);
3383 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3385 struct btrfs_block_group_cache *block_group;
3388 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3389 if (!block_group || block_group->ro)
3392 btrfs_put_block_group(block_group);
3396 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3397 u64 total_bytes, u64 bytes_used,
3398 struct btrfs_space_info **space_info)
3400 struct btrfs_space_info *found;
3405 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3406 BTRFS_BLOCK_GROUP_RAID10))
3411 found = __find_space_info(info, flags);
3413 spin_lock(&found->lock);
3414 found->total_bytes += total_bytes;
3415 found->disk_total += total_bytes * factor;
3416 found->bytes_used += bytes_used;
3417 found->disk_used += bytes_used * factor;
3419 spin_unlock(&found->lock);
3420 *space_info = found;
3423 found = kzalloc(sizeof(*found), GFP_NOFS);
3427 ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3433 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3434 INIT_LIST_HEAD(&found->block_groups[i]);
3435 init_rwsem(&found->groups_sem);
3436 spin_lock_init(&found->lock);
3437 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3438 found->total_bytes = total_bytes;
3439 found->disk_total = total_bytes * factor;
3440 found->bytes_used = bytes_used;
3441 found->disk_used = bytes_used * factor;
3442 found->bytes_pinned = 0;
3443 found->bytes_reserved = 0;
3444 found->bytes_readonly = 0;
3445 found->bytes_may_use = 0;
3447 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3448 found->chunk_alloc = 0;
3450 init_waitqueue_head(&found->wait);
3451 *space_info = found;
3452 list_add_rcu(&found->list, &info->space_info);
3453 if (flags & BTRFS_BLOCK_GROUP_DATA)
3454 info->data_sinfo = found;
3458 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3460 u64 extra_flags = chunk_to_extended(flags) &
3461 BTRFS_EXTENDED_PROFILE_MASK;
3463 write_seqlock(&fs_info->profiles_lock);
3464 if (flags & BTRFS_BLOCK_GROUP_DATA)
3465 fs_info->avail_data_alloc_bits |= extra_flags;
3466 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3467 fs_info->avail_metadata_alloc_bits |= extra_flags;
3468 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3469 fs_info->avail_system_alloc_bits |= extra_flags;
3470 write_sequnlock(&fs_info->profiles_lock);
3474 * returns target flags in extended format or 0 if restripe for this
3475 * chunk_type is not in progress
3477 * should be called with either volume_mutex or balance_lock held
3479 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3481 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3487 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3488 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3489 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3490 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3491 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3492 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3493 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3494 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3495 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3502 * @flags: available profiles in extended format (see ctree.h)
3504 * Returns reduced profile in chunk format. If profile changing is in
3505 * progress (either running or paused) picks the target profile (if it's
3506 * already available), otherwise falls back to plain reducing.
3508 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3511 * we add in the count of missing devices because we want
3512 * to make sure that any RAID levels on a degraded FS
3513 * continue to be honored.
3515 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3516 root->fs_info->fs_devices->missing_devices;
3521 * see if restripe for this chunk_type is in progress, if so
3522 * try to reduce to the target profile
3524 spin_lock(&root->fs_info->balance_lock);
3525 target = get_restripe_target(root->fs_info, flags);
3527 /* pick target profile only if it's already available */
3528 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3529 spin_unlock(&root->fs_info->balance_lock);
3530 return extended_to_chunk(target);
3533 spin_unlock(&root->fs_info->balance_lock);
3535 /* First, mask out the RAID levels which aren't possible */
3536 if (num_devices == 1)
3537 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3538 BTRFS_BLOCK_GROUP_RAID5);
3539 if (num_devices < 3)
3540 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3541 if (num_devices < 4)
3542 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3544 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3545 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3546 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3549 if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3550 tmp = BTRFS_BLOCK_GROUP_RAID6;
3551 else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3552 tmp = BTRFS_BLOCK_GROUP_RAID5;
3553 else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3554 tmp = BTRFS_BLOCK_GROUP_RAID10;
3555 else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3556 tmp = BTRFS_BLOCK_GROUP_RAID1;
3557 else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3558 tmp = BTRFS_BLOCK_GROUP_RAID0;
3560 return extended_to_chunk(flags | tmp);
3563 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3568 seq = read_seqbegin(&root->fs_info->profiles_lock);
3570 if (flags & BTRFS_BLOCK_GROUP_DATA)
3571 flags |= root->fs_info->avail_data_alloc_bits;
3572 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3573 flags |= root->fs_info->avail_system_alloc_bits;
3574 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3575 flags |= root->fs_info->avail_metadata_alloc_bits;
3576 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3578 return btrfs_reduce_alloc_profile(root, flags);
3581 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3587 flags = BTRFS_BLOCK_GROUP_DATA;
3588 else if (root == root->fs_info->chunk_root)
3589 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3591 flags = BTRFS_BLOCK_GROUP_METADATA;
3593 ret = get_alloc_profile(root, flags);
3598 * This will check the space that the inode allocates from to make sure we have
3599 * enough space for bytes.
3601 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3603 struct btrfs_space_info *data_sinfo;
3604 struct btrfs_root *root = BTRFS_I(inode)->root;
3605 struct btrfs_fs_info *fs_info = root->fs_info;
3607 int ret = 0, committed = 0, alloc_chunk = 1;
3609 /* make sure bytes are sectorsize aligned */
3610 bytes = ALIGN(bytes, root->sectorsize);
3612 if (root == root->fs_info->tree_root ||
3613 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3618 data_sinfo = fs_info->data_sinfo;
3623 /* make sure we have enough space to handle the data first */
3624 spin_lock(&data_sinfo->lock);
3625 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3626 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3627 data_sinfo->bytes_may_use;
3629 if (used + bytes > data_sinfo->total_bytes) {
3630 struct btrfs_trans_handle *trans;
3633 * if we don't have enough free bytes in this space then we need
3634 * to alloc a new chunk.
3636 if (!data_sinfo->full && alloc_chunk) {
3639 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3640 spin_unlock(&data_sinfo->lock);
3642 alloc_target = btrfs_get_alloc_profile(root, 1);
3643 trans = btrfs_join_transaction(root);
3645 return PTR_ERR(trans);
3647 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3649 CHUNK_ALLOC_NO_FORCE);
3650 btrfs_end_transaction(trans, root);
3659 data_sinfo = fs_info->data_sinfo;
3665 * If we don't have enough pinned space to deal with this
3666 * allocation don't bother committing the transaction.
3668 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3671 spin_unlock(&data_sinfo->lock);
3673 /* commit the current transaction and try again */
3676 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3679 trans = btrfs_join_transaction(root);
3681 return PTR_ERR(trans);
3682 ret = btrfs_commit_transaction(trans, root);
3690 data_sinfo->bytes_may_use += bytes;
3691 trace_btrfs_space_reservation(root->fs_info, "space_info",
3692 data_sinfo->flags, bytes, 1);
3693 spin_unlock(&data_sinfo->lock);
3699 * Called if we need to clear a data reservation for this inode.
3701 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3703 struct btrfs_root *root = BTRFS_I(inode)->root;
3704 struct btrfs_space_info *data_sinfo;
3706 /* make sure bytes are sectorsize aligned */
3707 bytes = ALIGN(bytes, root->sectorsize);
3709 data_sinfo = root->fs_info->data_sinfo;
3710 spin_lock(&data_sinfo->lock);
3711 WARN_ON(data_sinfo->bytes_may_use < bytes);
3712 data_sinfo->bytes_may_use -= bytes;
3713 trace_btrfs_space_reservation(root->fs_info, "space_info",
3714 data_sinfo->flags, bytes, 0);
3715 spin_unlock(&data_sinfo->lock);
3718 static void force_metadata_allocation(struct btrfs_fs_info *info)
3720 struct list_head *head = &info->space_info;
3721 struct btrfs_space_info *found;
3724 list_for_each_entry_rcu(found, head, list) {
3725 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3726 found->force_alloc = CHUNK_ALLOC_FORCE;
3731 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3733 return (global->size << 1);
3736 static int should_alloc_chunk(struct btrfs_root *root,
3737 struct btrfs_space_info *sinfo, int force)
3739 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3740 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3741 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3744 if (force == CHUNK_ALLOC_FORCE)
3748 * We need to take into account the global rsv because for all intents
3749 * and purposes it's used space. Don't worry about locking the
3750 * global_rsv, it doesn't change except when the transaction commits.
3752 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3753 num_allocated += calc_global_rsv_need_space(global_rsv);
3756 * in limited mode, we want to have some free space up to
3757 * about 1% of the FS size.
3759 if (force == CHUNK_ALLOC_LIMITED) {
3760 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3761 thresh = max_t(u64, 64 * 1024 * 1024,
3762 div_factor_fine(thresh, 1));
3764 if (num_bytes - num_allocated < thresh)
3768 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3773 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3777 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3778 BTRFS_BLOCK_GROUP_RAID0 |
3779 BTRFS_BLOCK_GROUP_RAID5 |
3780 BTRFS_BLOCK_GROUP_RAID6))
3781 num_dev = root->fs_info->fs_devices->rw_devices;
3782 else if (type & BTRFS_BLOCK_GROUP_RAID1)
3785 num_dev = 1; /* DUP or single */
3787 /* metadata for updaing devices and chunk tree */
3788 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3791 static void check_system_chunk(struct btrfs_trans_handle *trans,
3792 struct btrfs_root *root, u64 type)
3794 struct btrfs_space_info *info;
3798 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3799 spin_lock(&info->lock);
3800 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3801 info->bytes_reserved - info->bytes_readonly;
3802 spin_unlock(&info->lock);
3804 thresh = get_system_chunk_thresh(root, type);
3805 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3806 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3807 left, thresh, type);
3808 dump_space_info(info, 0, 0);
3811 if (left < thresh) {
3814 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3815 btrfs_alloc_chunk(trans, root, flags);
3819 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3820 struct btrfs_root *extent_root, u64 flags, int force)
3822 struct btrfs_space_info *space_info;
3823 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3824 int wait_for_alloc = 0;
3827 /* Don't re-enter if we're already allocating a chunk */
3828 if (trans->allocating_chunk)
3831 space_info = __find_space_info(extent_root->fs_info, flags);
3833 ret = update_space_info(extent_root->fs_info, flags,
3835 BUG_ON(ret); /* -ENOMEM */
3837 BUG_ON(!space_info); /* Logic error */
3840 spin_lock(&space_info->lock);
3841 if (force < space_info->force_alloc)
3842 force = space_info->force_alloc;
3843 if (space_info->full) {
3844 if (should_alloc_chunk(extent_root, space_info, force))
3848 spin_unlock(&space_info->lock);
3852 if (!should_alloc_chunk(extent_root, space_info, force)) {
3853 spin_unlock(&space_info->lock);
3855 } else if (space_info->chunk_alloc) {
3858 space_info->chunk_alloc = 1;
3861 spin_unlock(&space_info->lock);
3863 mutex_lock(&fs_info->chunk_mutex);
3866 * The chunk_mutex is held throughout the entirety of a chunk
3867 * allocation, so once we've acquired the chunk_mutex we know that the
3868 * other guy is done and we need to recheck and see if we should
3871 if (wait_for_alloc) {
3872 mutex_unlock(&fs_info->chunk_mutex);
3877 trans->allocating_chunk = true;
3880 * If we have mixed data/metadata chunks we want to make sure we keep
3881 * allocating mixed chunks instead of individual chunks.
3883 if (btrfs_mixed_space_info(space_info))
3884 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3887 * if we're doing a data chunk, go ahead and make sure that
3888 * we keep a reasonable number of metadata chunks allocated in the
3891 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3892 fs_info->data_chunk_allocations++;
3893 if (!(fs_info->data_chunk_allocations %
3894 fs_info->metadata_ratio))
3895 force_metadata_allocation(fs_info);
3899 * Check if we have enough space in SYSTEM chunk because we may need
3900 * to update devices.
3902 check_system_chunk(trans, extent_root, flags);
3904 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3905 trans->allocating_chunk = false;
3907 spin_lock(&space_info->lock);
3908 if (ret < 0 && ret != -ENOSPC)
3911 space_info->full = 1;
3915 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3917 space_info->chunk_alloc = 0;
3918 spin_unlock(&space_info->lock);
3919 mutex_unlock(&fs_info->chunk_mutex);
3923 static int can_overcommit(struct btrfs_root *root,
3924 struct btrfs_space_info *space_info, u64 bytes,
3925 enum btrfs_reserve_flush_enum flush)
3927 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3928 u64 profile = btrfs_get_alloc_profile(root, 0);
3933 used = space_info->bytes_used + space_info->bytes_reserved +
3934 space_info->bytes_pinned + space_info->bytes_readonly;
3937 * We only want to allow over committing if we have lots of actual space
3938 * free, but if we don't have enough space to handle the global reserve
3939 * space then we could end up having a real enospc problem when trying
3940 * to allocate a chunk or some other such important allocation.
3942 spin_lock(&global_rsv->lock);
3943 space_size = calc_global_rsv_need_space(global_rsv);
3944 spin_unlock(&global_rsv->lock);
3945 if (used + space_size >= space_info->total_bytes)
3948 used += space_info->bytes_may_use;
3950 spin_lock(&root->fs_info->free_chunk_lock);
3951 avail = root->fs_info->free_chunk_space;
3952 spin_unlock(&root->fs_info->free_chunk_lock);
3955 * If we have dup, raid1 or raid10 then only half of the free
3956 * space is actually useable. For raid56, the space info used
3957 * doesn't include the parity drive, so we don't have to
3960 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3961 BTRFS_BLOCK_GROUP_RAID1 |
3962 BTRFS_BLOCK_GROUP_RAID10))
3966 * If we aren't flushing all things, let us overcommit up to
3967 * 1/2th of the space. If we can flush, don't let us overcommit
3968 * too much, let it overcommit up to 1/8 of the space.
3970 if (flush == BTRFS_RESERVE_FLUSH_ALL)
3975 if (used + bytes < space_info->total_bytes + avail)
3980 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3981 unsigned long nr_pages)
3983 struct super_block *sb = root->fs_info->sb;
3985 if (down_read_trylock(&sb->s_umount)) {
3986 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3987 up_read(&sb->s_umount);
3990 * We needn't worry the filesystem going from r/w to r/o though
3991 * we don't acquire ->s_umount mutex, because the filesystem
3992 * should guarantee the delalloc inodes list be empty after
3993 * the filesystem is readonly(all dirty pages are written to
3996 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
3997 if (!current->journal_info)
3998 btrfs_wait_all_ordered_extents(root->fs_info);
4003 * shrink metadata reservation for delalloc
4005 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4008 struct btrfs_block_rsv *block_rsv;
4009 struct btrfs_space_info *space_info;
4010 struct btrfs_trans_handle *trans;
4014 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
4016 enum btrfs_reserve_flush_enum flush;
4018 trans = (struct btrfs_trans_handle *)current->journal_info;
4019 block_rsv = &root->fs_info->delalloc_block_rsv;
4020 space_info = block_rsv->space_info;
4023 delalloc_bytes = percpu_counter_sum_positive(
4024 &root->fs_info->delalloc_bytes);
4025 if (delalloc_bytes == 0) {
4028 btrfs_wait_all_ordered_extents(root->fs_info);
4032 while (delalloc_bytes && loops < 3) {
4033 max_reclaim = min(delalloc_bytes, to_reclaim);
4034 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4035 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4037 * We need to wait for the async pages to actually start before
4040 wait_event(root->fs_info->async_submit_wait,
4041 !atomic_read(&root->fs_info->async_delalloc_pages));
4044 flush = BTRFS_RESERVE_FLUSH_ALL;
4046 flush = BTRFS_RESERVE_NO_FLUSH;
4047 spin_lock(&space_info->lock);
4048 if (can_overcommit(root, space_info, orig, flush)) {
4049 spin_unlock(&space_info->lock);
4052 spin_unlock(&space_info->lock);
4055 if (wait_ordered && !trans) {
4056 btrfs_wait_all_ordered_extents(root->fs_info);
4058 time_left = schedule_timeout_killable(1);
4063 delalloc_bytes = percpu_counter_sum_positive(
4064 &root->fs_info->delalloc_bytes);
4069 * maybe_commit_transaction - possibly commit the transaction if its ok to
4070 * @root - the root we're allocating for
4071 * @bytes - the number of bytes we want to reserve
4072 * @force - force the commit
4074 * This will check to make sure that committing the transaction will actually
4075 * get us somewhere and then commit the transaction if it does. Otherwise it
4076 * will return -ENOSPC.
4078 static int may_commit_transaction(struct btrfs_root *root,
4079 struct btrfs_space_info *space_info,
4080 u64 bytes, int force)
4082 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4083 struct btrfs_trans_handle *trans;
4085 trans = (struct btrfs_trans_handle *)current->journal_info;
4092 /* See if there is enough pinned space to make this reservation */
4093 spin_lock(&space_info->lock);
4094 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4096 spin_unlock(&space_info->lock);
4099 spin_unlock(&space_info->lock);
4102 * See if there is some space in the delayed insertion reservation for
4105 if (space_info != delayed_rsv->space_info)
4108 spin_lock(&space_info->lock);
4109 spin_lock(&delayed_rsv->lock);
4110 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4111 bytes - delayed_rsv->size) >= 0) {
4112 spin_unlock(&delayed_rsv->lock);
4113 spin_unlock(&space_info->lock);
4116 spin_unlock(&delayed_rsv->lock);
4117 spin_unlock(&space_info->lock);
4120 trans = btrfs_join_transaction(root);
4124 return btrfs_commit_transaction(trans, root);
4128 FLUSH_DELAYED_ITEMS_NR = 1,
4129 FLUSH_DELAYED_ITEMS = 2,
4131 FLUSH_DELALLOC_WAIT = 4,
4136 static int flush_space(struct btrfs_root *root,
4137 struct btrfs_space_info *space_info, u64 num_bytes,
4138 u64 orig_bytes, int state)
4140 struct btrfs_trans_handle *trans;
4145 case FLUSH_DELAYED_ITEMS_NR:
4146 case FLUSH_DELAYED_ITEMS:
4147 if (state == FLUSH_DELAYED_ITEMS_NR) {
4148 u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4150 nr = (int)div64_u64(num_bytes, bytes);
4157 trans = btrfs_join_transaction(root);
4158 if (IS_ERR(trans)) {
4159 ret = PTR_ERR(trans);
4162 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4163 btrfs_end_transaction(trans, root);
4165 case FLUSH_DELALLOC:
4166 case FLUSH_DELALLOC_WAIT:
4167 shrink_delalloc(root, num_bytes, orig_bytes,
4168 state == FLUSH_DELALLOC_WAIT);
4171 trans = btrfs_join_transaction(root);
4172 if (IS_ERR(trans)) {
4173 ret = PTR_ERR(trans);
4176 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4177 btrfs_get_alloc_profile(root, 0),
4178 CHUNK_ALLOC_NO_FORCE);
4179 btrfs_end_transaction(trans, root);
4184 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4194 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4195 * @root - the root we're allocating for
4196 * @block_rsv - the block_rsv we're allocating for
4197 * @orig_bytes - the number of bytes we want
4198 * @flush - whether or not we can flush to make our reservation
4200 * This will reserve orgi_bytes number of bytes from the space info associated
4201 * with the block_rsv. If there is not enough space it will make an attempt to
4202 * flush out space to make room. It will do this by flushing delalloc if
4203 * possible or committing the transaction. If flush is 0 then no attempts to
4204 * regain reservations will be made and this will fail if there is not enough
4207 static int reserve_metadata_bytes(struct btrfs_root *root,
4208 struct btrfs_block_rsv *block_rsv,
4210 enum btrfs_reserve_flush_enum flush)
4212 struct btrfs_space_info *space_info = block_rsv->space_info;
4214 u64 num_bytes = orig_bytes;
4215 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4217 bool flushing = false;
4221 spin_lock(&space_info->lock);
4223 * We only want to wait if somebody other than us is flushing and we
4224 * are actually allowed to flush all things.
4226 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4227 space_info->flush) {
4228 spin_unlock(&space_info->lock);
4230 * If we have a trans handle we can't wait because the flusher
4231 * may have to commit the transaction, which would mean we would
4232 * deadlock since we are waiting for the flusher to finish, but
4233 * hold the current transaction open.
4235 if (current->journal_info)
4237 ret = wait_event_killable(space_info->wait, !space_info->flush);
4238 /* Must have been killed, return */
4242 spin_lock(&space_info->lock);
4246 used = space_info->bytes_used + space_info->bytes_reserved +
4247 space_info->bytes_pinned + space_info->bytes_readonly +
4248 space_info->bytes_may_use;
4251 * The idea here is that we've not already over-reserved the block group
4252 * then we can go ahead and save our reservation first and then start
4253 * flushing if we need to. Otherwise if we've already overcommitted
4254 * lets start flushing stuff first and then come back and try to make
4257 if (used <= space_info->total_bytes) {
4258 if (used + orig_bytes <= space_info->total_bytes) {
4259 space_info->bytes_may_use += orig_bytes;
4260 trace_btrfs_space_reservation(root->fs_info,
4261 "space_info", space_info->flags, orig_bytes, 1);
4265 * Ok set num_bytes to orig_bytes since we aren't
4266 * overocmmitted, this way we only try and reclaim what
4269 num_bytes = orig_bytes;
4273 * Ok we're over committed, set num_bytes to the overcommitted
4274 * amount plus the amount of bytes that we need for this
4277 num_bytes = used - space_info->total_bytes +
4281 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4282 space_info->bytes_may_use += orig_bytes;
4283 trace_btrfs_space_reservation(root->fs_info, "space_info",
4284 space_info->flags, orig_bytes,
4290 * Couldn't make our reservation, save our place so while we're trying
4291 * to reclaim space we can actually use it instead of somebody else
4292 * stealing it from us.
4294 * We make the other tasks wait for the flush only when we can flush
4297 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4299 space_info->flush = 1;
4302 spin_unlock(&space_info->lock);
4304 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4307 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4312 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4313 * would happen. So skip delalloc flush.
4315 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4316 (flush_state == FLUSH_DELALLOC ||
4317 flush_state == FLUSH_DELALLOC_WAIT))
4318 flush_state = ALLOC_CHUNK;
4322 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4323 flush_state < COMMIT_TRANS)
4325 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4326 flush_state <= COMMIT_TRANS)
4330 if (ret == -ENOSPC &&
4331 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4332 struct btrfs_block_rsv *global_rsv =
4333 &root->fs_info->global_block_rsv;
4335 if (block_rsv != global_rsv &&
4336 !block_rsv_use_bytes(global_rsv, orig_bytes))
4340 spin_lock(&space_info->lock);
4341 space_info->flush = 0;
4342 wake_up_all(&space_info->wait);
4343 spin_unlock(&space_info->lock);
4348 static struct btrfs_block_rsv *get_block_rsv(
4349 const struct btrfs_trans_handle *trans,
4350 const struct btrfs_root *root)
4352 struct btrfs_block_rsv *block_rsv = NULL;
4355 block_rsv = trans->block_rsv;
4357 if (root == root->fs_info->csum_root && trans->adding_csums)
4358 block_rsv = trans->block_rsv;
4360 if (root == root->fs_info->uuid_root)
4361 block_rsv = trans->block_rsv;
4364 block_rsv = root->block_rsv;
4367 block_rsv = &root->fs_info->empty_block_rsv;
4372 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4376 spin_lock(&block_rsv->lock);
4377 if (block_rsv->reserved >= num_bytes) {
4378 block_rsv->reserved -= num_bytes;
4379 if (block_rsv->reserved < block_rsv->size)
4380 block_rsv->full = 0;
4383 spin_unlock(&block_rsv->lock);
4387 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4388 u64 num_bytes, int update_size)
4390 spin_lock(&block_rsv->lock);
4391 block_rsv->reserved += num_bytes;
4393 block_rsv->size += num_bytes;
4394 else if (block_rsv->reserved >= block_rsv->size)
4395 block_rsv->full = 1;
4396 spin_unlock(&block_rsv->lock);
4399 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4400 struct btrfs_block_rsv *dest, u64 num_bytes,
4403 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4406 if (global_rsv->space_info != dest->space_info)
4409 spin_lock(&global_rsv->lock);
4410 min_bytes = div_factor(global_rsv->size, min_factor);
4411 if (global_rsv->reserved < min_bytes + num_bytes) {
4412 spin_unlock(&global_rsv->lock);
4415 global_rsv->reserved -= num_bytes;
4416 if (global_rsv->reserved < global_rsv->size)
4417 global_rsv->full = 0;
4418 spin_unlock(&global_rsv->lock);
4420 block_rsv_add_bytes(dest, num_bytes, 1);
4424 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4425 struct btrfs_block_rsv *block_rsv,
4426 struct btrfs_block_rsv *dest, u64 num_bytes)
4428 struct btrfs_space_info *space_info = block_rsv->space_info;
4430 spin_lock(&block_rsv->lock);
4431 if (num_bytes == (u64)-1)
4432 num_bytes = block_rsv->size;
4433 block_rsv->size -= num_bytes;
4434 if (block_rsv->reserved >= block_rsv->size) {
4435 num_bytes = block_rsv->reserved - block_rsv->size;
4436 block_rsv->reserved = block_rsv->size;
4437 block_rsv->full = 1;
4441 spin_unlock(&block_rsv->lock);
4443 if (num_bytes > 0) {
4445 spin_lock(&dest->lock);
4449 bytes_to_add = dest->size - dest->reserved;
4450 bytes_to_add = min(num_bytes, bytes_to_add);
4451 dest->reserved += bytes_to_add;
4452 if (dest->reserved >= dest->size)
4454 num_bytes -= bytes_to_add;
4456 spin_unlock(&dest->lock);
4459 spin_lock(&space_info->lock);
4460 space_info->bytes_may_use -= num_bytes;
4461 trace_btrfs_space_reservation(fs_info, "space_info",
4462 space_info->flags, num_bytes, 0);
4463 spin_unlock(&space_info->lock);
4468 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4469 struct btrfs_block_rsv *dst, u64 num_bytes)
4473 ret = block_rsv_use_bytes(src, num_bytes);
4477 block_rsv_add_bytes(dst, num_bytes, 1);
4481 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4483 memset(rsv, 0, sizeof(*rsv));
4484 spin_lock_init(&rsv->lock);
4488 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4489 unsigned short type)
4491 struct btrfs_block_rsv *block_rsv;
4492 struct btrfs_fs_info *fs_info = root->fs_info;
4494 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4498 btrfs_init_block_rsv(block_rsv, type);
4499 block_rsv->space_info = __find_space_info(fs_info,
4500 BTRFS_BLOCK_GROUP_METADATA);
4504 void btrfs_free_block_rsv(struct btrfs_root *root,
4505 struct btrfs_block_rsv *rsv)
4509 btrfs_block_rsv_release(root, rsv, (u64)-1);
4513 int btrfs_block_rsv_add(struct btrfs_root *root,
4514 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4515 enum btrfs_reserve_flush_enum flush)
4522 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4524 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4531 int btrfs_block_rsv_check(struct btrfs_root *root,
4532 struct btrfs_block_rsv *block_rsv, int min_factor)
4540 spin_lock(&block_rsv->lock);
4541 num_bytes = div_factor(block_rsv->size, min_factor);
4542 if (block_rsv->reserved >= num_bytes)
4544 spin_unlock(&block_rsv->lock);
4549 int btrfs_block_rsv_refill(struct btrfs_root *root,
4550 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4551 enum btrfs_reserve_flush_enum flush)
4559 spin_lock(&block_rsv->lock);
4560 num_bytes = min_reserved;
4561 if (block_rsv->reserved >= num_bytes)
4564 num_bytes -= block_rsv->reserved;
4565 spin_unlock(&block_rsv->lock);
4570 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4572 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4579 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4580 struct btrfs_block_rsv *dst_rsv,
4583 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4586 void btrfs_block_rsv_release(struct btrfs_root *root,
4587 struct btrfs_block_rsv *block_rsv,
4590 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4591 if (global_rsv->full || global_rsv == block_rsv ||
4592 block_rsv->space_info != global_rsv->space_info)
4594 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4599 * helper to calculate size of global block reservation.
4600 * the desired value is sum of space used by extent tree,
4601 * checksum tree and root tree
4603 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4605 struct btrfs_space_info *sinfo;
4609 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4611 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4612 spin_lock(&sinfo->lock);
4613 data_used = sinfo->bytes_used;
4614 spin_unlock(&sinfo->lock);
4616 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4617 spin_lock(&sinfo->lock);
4618 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4620 meta_used = sinfo->bytes_used;
4621 spin_unlock(&sinfo->lock);
4623 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4625 num_bytes += div64_u64(data_used + meta_used, 50);
4627 if (num_bytes * 3 > meta_used)
4628 num_bytes = div64_u64(meta_used, 3);
4630 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4633 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4635 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4636 struct btrfs_space_info *sinfo = block_rsv->space_info;
4639 num_bytes = calc_global_metadata_size(fs_info);
4641 spin_lock(&sinfo->lock);
4642 spin_lock(&block_rsv->lock);
4644 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4646 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4647 sinfo->bytes_reserved + sinfo->bytes_readonly +
4648 sinfo->bytes_may_use;
4650 if (sinfo->total_bytes > num_bytes) {
4651 num_bytes = sinfo->total_bytes - num_bytes;
4652 block_rsv->reserved += num_bytes;
4653 sinfo->bytes_may_use += num_bytes;
4654 trace_btrfs_space_reservation(fs_info, "space_info",
4655 sinfo->flags, num_bytes, 1);
4658 if (block_rsv->reserved >= block_rsv->size) {
4659 num_bytes = block_rsv->reserved - block_rsv->size;
4660 sinfo->bytes_may_use -= num_bytes;
4661 trace_btrfs_space_reservation(fs_info, "space_info",
4662 sinfo->flags, num_bytes, 0);
4663 block_rsv->reserved = block_rsv->size;
4664 block_rsv->full = 1;
4667 spin_unlock(&block_rsv->lock);
4668 spin_unlock(&sinfo->lock);
4671 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4673 struct btrfs_space_info *space_info;
4675 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4676 fs_info->chunk_block_rsv.space_info = space_info;
4678 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4679 fs_info->global_block_rsv.space_info = space_info;
4680 fs_info->delalloc_block_rsv.space_info = space_info;
4681 fs_info->trans_block_rsv.space_info = space_info;
4682 fs_info->empty_block_rsv.space_info = space_info;
4683 fs_info->delayed_block_rsv.space_info = space_info;
4685 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4686 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4687 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4688 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4689 if (fs_info->quota_root)
4690 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4691 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4693 update_global_block_rsv(fs_info);
4696 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4698 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4700 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4701 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4702 WARN_ON(fs_info->trans_block_rsv.size > 0);
4703 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4704 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4705 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4706 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4707 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4710 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4711 struct btrfs_root *root)
4713 if (!trans->block_rsv)
4716 if (!trans->bytes_reserved)
4719 trace_btrfs_space_reservation(root->fs_info, "transaction",
4720 trans->transid, trans->bytes_reserved, 0);
4721 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4722 trans->bytes_reserved = 0;
4725 /* Can only return 0 or -ENOSPC */
4726 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4727 struct inode *inode)
4729 struct btrfs_root *root = BTRFS_I(inode)->root;
4730 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4731 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4734 * We need to hold space in order to delete our orphan item once we've
4735 * added it, so this takes the reservation so we can release it later
4736 * when we are truly done with the orphan item.
4738 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4739 trace_btrfs_space_reservation(root->fs_info, "orphan",
4740 btrfs_ino(inode), num_bytes, 1);
4741 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4744 void btrfs_orphan_release_metadata(struct inode *inode)
4746 struct btrfs_root *root = BTRFS_I(inode)->root;
4747 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4748 trace_btrfs_space_reservation(root->fs_info, "orphan",
4749 btrfs_ino(inode), num_bytes, 0);
4750 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4754 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4755 * root: the root of the parent directory
4756 * rsv: block reservation
4757 * items: the number of items that we need do reservation
4758 * qgroup_reserved: used to return the reserved size in qgroup
4760 * This function is used to reserve the space for snapshot/subvolume
4761 * creation and deletion. Those operations are different with the
4762 * common file/directory operations, they change two fs/file trees
4763 * and root tree, the number of items that the qgroup reserves is
4764 * different with the free space reservation. So we can not use
4765 * the space reseravtion mechanism in start_transaction().
4767 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4768 struct btrfs_block_rsv *rsv,
4770 u64 *qgroup_reserved,
4771 bool use_global_rsv)
4775 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4777 if (root->fs_info->quota_enabled) {
4778 /* One for parent inode, two for dir entries */
4779 num_bytes = 3 * root->leafsize;
4780 ret = btrfs_qgroup_reserve(root, num_bytes);
4787 *qgroup_reserved = num_bytes;
4789 num_bytes = btrfs_calc_trans_metadata_size(root, items);
4790 rsv->space_info = __find_space_info(root->fs_info,
4791 BTRFS_BLOCK_GROUP_METADATA);
4792 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4793 BTRFS_RESERVE_FLUSH_ALL);
4795 if (ret == -ENOSPC && use_global_rsv)
4796 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4799 if (*qgroup_reserved)
4800 btrfs_qgroup_free(root, *qgroup_reserved);
4806 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4807 struct btrfs_block_rsv *rsv,
4808 u64 qgroup_reserved)
4810 btrfs_block_rsv_release(root, rsv, (u64)-1);
4811 if (qgroup_reserved)
4812 btrfs_qgroup_free(root, qgroup_reserved);
4816 * drop_outstanding_extent - drop an outstanding extent
4817 * @inode: the inode we're dropping the extent for
4819 * This is called when we are freeing up an outstanding extent, either called
4820 * after an error or after an extent is written. This will return the number of
4821 * reserved extents that need to be freed. This must be called with
4822 * BTRFS_I(inode)->lock held.
4824 static unsigned drop_outstanding_extent(struct inode *inode)
4826 unsigned drop_inode_space = 0;
4827 unsigned dropped_extents = 0;
4829 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4830 BTRFS_I(inode)->outstanding_extents--;
4832 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4833 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4834 &BTRFS_I(inode)->runtime_flags))
4835 drop_inode_space = 1;
4838 * If we have more or the same amount of outsanding extents than we have
4839 * reserved then we need to leave the reserved extents count alone.
4841 if (BTRFS_I(inode)->outstanding_extents >=
4842 BTRFS_I(inode)->reserved_extents)
4843 return drop_inode_space;
4845 dropped_extents = BTRFS_I(inode)->reserved_extents -
4846 BTRFS_I(inode)->outstanding_extents;
4847 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4848 return dropped_extents + drop_inode_space;
4852 * calc_csum_metadata_size - return the amount of metada space that must be
4853 * reserved/free'd for the given bytes.
4854 * @inode: the inode we're manipulating
4855 * @num_bytes: the number of bytes in question
4856 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4858 * This adjusts the number of csum_bytes in the inode and then returns the
4859 * correct amount of metadata that must either be reserved or freed. We
4860 * calculate how many checksums we can fit into one leaf and then divide the
4861 * number of bytes that will need to be checksumed by this value to figure out
4862 * how many checksums will be required. If we are adding bytes then the number
4863 * may go up and we will return the number of additional bytes that must be
4864 * reserved. If it is going down we will return the number of bytes that must
4867 * This must be called with BTRFS_I(inode)->lock held.
4869 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4872 struct btrfs_root *root = BTRFS_I(inode)->root;
4874 int num_csums_per_leaf;
4878 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4879 BTRFS_I(inode)->csum_bytes == 0)
4882 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4884 BTRFS_I(inode)->csum_bytes += num_bytes;
4886 BTRFS_I(inode)->csum_bytes -= num_bytes;
4887 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4888 num_csums_per_leaf = (int)div64_u64(csum_size,
4889 sizeof(struct btrfs_csum_item) +
4890 sizeof(struct btrfs_disk_key));
4891 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4892 num_csums = num_csums + num_csums_per_leaf - 1;
4893 num_csums = num_csums / num_csums_per_leaf;
4895 old_csums = old_csums + num_csums_per_leaf - 1;
4896 old_csums = old_csums / num_csums_per_leaf;
4898 /* No change, no need to reserve more */
4899 if (old_csums == num_csums)
4903 return btrfs_calc_trans_metadata_size(root,
4904 num_csums - old_csums);
4906 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4909 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4911 struct btrfs_root *root = BTRFS_I(inode)->root;
4912 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4915 unsigned nr_extents = 0;
4916 int extra_reserve = 0;
4917 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4919 bool delalloc_lock = true;
4923 /* If we are a free space inode we need to not flush since we will be in
4924 * the middle of a transaction commit. We also don't need the delalloc
4925 * mutex since we won't race with anybody. We need this mostly to make
4926 * lockdep shut its filthy mouth.
4928 if (btrfs_is_free_space_inode(inode)) {
4929 flush = BTRFS_RESERVE_NO_FLUSH;
4930 delalloc_lock = false;
4933 if (flush != BTRFS_RESERVE_NO_FLUSH &&
4934 btrfs_transaction_in_commit(root->fs_info))
4935 schedule_timeout(1);
4938 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4940 num_bytes = ALIGN(num_bytes, root->sectorsize);
4942 spin_lock(&BTRFS_I(inode)->lock);
4943 BTRFS_I(inode)->outstanding_extents++;
4945 if (BTRFS_I(inode)->outstanding_extents >
4946 BTRFS_I(inode)->reserved_extents)
4947 nr_extents = BTRFS_I(inode)->outstanding_extents -
4948 BTRFS_I(inode)->reserved_extents;
4951 * Add an item to reserve for updating the inode when we complete the
4954 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4955 &BTRFS_I(inode)->runtime_flags)) {
4960 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4961 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4962 csum_bytes = BTRFS_I(inode)->csum_bytes;
4963 spin_unlock(&BTRFS_I(inode)->lock);
4965 if (root->fs_info->quota_enabled) {
4966 ret = btrfs_qgroup_reserve(root, num_bytes +
4967 nr_extents * root->leafsize);
4972 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4973 if (unlikely(ret)) {
4974 if (root->fs_info->quota_enabled)
4975 btrfs_qgroup_free(root, num_bytes +
4976 nr_extents * root->leafsize);
4980 spin_lock(&BTRFS_I(inode)->lock);
4981 if (extra_reserve) {
4982 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4983 &BTRFS_I(inode)->runtime_flags);
4986 BTRFS_I(inode)->reserved_extents += nr_extents;
4987 spin_unlock(&BTRFS_I(inode)->lock);
4990 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4993 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4994 btrfs_ino(inode), to_reserve, 1);
4995 block_rsv_add_bytes(block_rsv, to_reserve, 1);
5000 spin_lock(&BTRFS_I(inode)->lock);
5001 dropped = drop_outstanding_extent(inode);
5003 * If the inodes csum_bytes is the same as the original
5004 * csum_bytes then we know we haven't raced with any free()ers
5005 * so we can just reduce our inodes csum bytes and carry on.
5007 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5008 calc_csum_metadata_size(inode, num_bytes, 0);
5010 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5014 * This is tricky, but first we need to figure out how much we
5015 * free'd from any free-ers that occured during this
5016 * reservation, so we reset ->csum_bytes to the csum_bytes
5017 * before we dropped our lock, and then call the free for the
5018 * number of bytes that were freed while we were trying our
5021 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5022 BTRFS_I(inode)->csum_bytes = csum_bytes;
5023 to_free = calc_csum_metadata_size(inode, bytes, 0);
5027 * Now we need to see how much we would have freed had we not
5028 * been making this reservation and our ->csum_bytes were not
5029 * artificially inflated.
5031 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5032 bytes = csum_bytes - orig_csum_bytes;
5033 bytes = calc_csum_metadata_size(inode, bytes, 0);
5036 * Now reset ->csum_bytes to what it should be. If bytes is
5037 * more than to_free then we would have free'd more space had we
5038 * not had an artificially high ->csum_bytes, so we need to free
5039 * the remainder. If bytes is the same or less then we don't
5040 * need to do anything, the other free-ers did the correct
5043 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5044 if (bytes > to_free)
5045 to_free = bytes - to_free;
5049 spin_unlock(&BTRFS_I(inode)->lock);
5051 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5054 btrfs_block_rsv_release(root, block_rsv, to_free);
5055 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5056 btrfs_ino(inode), to_free, 0);
5059 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5064 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5065 * @inode: the inode to release the reservation for
5066 * @num_bytes: the number of bytes we're releasing
5068 * This will release the metadata reservation for an inode. This can be called
5069 * once we complete IO for a given set of bytes to release their metadata
5072 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5074 struct btrfs_root *root = BTRFS_I(inode)->root;
5078 num_bytes = ALIGN(num_bytes, root->sectorsize);
5079 spin_lock(&BTRFS_I(inode)->lock);
5080 dropped = drop_outstanding_extent(inode);
5083 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5084 spin_unlock(&BTRFS_I(inode)->lock);
5086 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5088 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5089 btrfs_ino(inode), to_free, 0);
5090 if (root->fs_info->quota_enabled) {
5091 btrfs_qgroup_free(root, num_bytes +
5092 dropped * root->leafsize);
5095 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5100 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5101 * @inode: inode we're writing to
5102 * @num_bytes: the number of bytes we want to allocate
5104 * This will do the following things
5106 * o reserve space in the data space info for num_bytes
5107 * o reserve space in the metadata space info based on number of outstanding
5108 * extents and how much csums will be needed
5109 * o add to the inodes ->delalloc_bytes
5110 * o add it to the fs_info's delalloc inodes list.
5112 * This will return 0 for success and -ENOSPC if there is no space left.
5114 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5118 ret = btrfs_check_data_free_space(inode, num_bytes);
5122 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5124 btrfs_free_reserved_data_space(inode, num_bytes);
5132 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5133 * @inode: inode we're releasing space for
5134 * @num_bytes: the number of bytes we want to free up
5136 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5137 * called in the case that we don't need the metadata AND data reservations
5138 * anymore. So if there is an error or we insert an inline extent.
5140 * This function will release the metadata space that was not used and will
5141 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5142 * list if there are no delalloc bytes left.
5144 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5146 btrfs_delalloc_release_metadata(inode, num_bytes);
5147 btrfs_free_reserved_data_space(inode, num_bytes);
5150 static int update_block_group(struct btrfs_root *root,
5151 u64 bytenr, u64 num_bytes, int alloc)
5153 struct btrfs_block_group_cache *cache = NULL;
5154 struct btrfs_fs_info *info = root->fs_info;
5155 u64 total = num_bytes;
5160 /* block accounting for super block */
5161 spin_lock(&info->delalloc_root_lock);
5162 old_val = btrfs_super_bytes_used(info->super_copy);
5164 old_val += num_bytes;
5166 old_val -= num_bytes;
5167 btrfs_set_super_bytes_used(info->super_copy, old_val);
5168 spin_unlock(&info->delalloc_root_lock);
5171 cache = btrfs_lookup_block_group(info, bytenr);
5174 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5175 BTRFS_BLOCK_GROUP_RAID1 |
5176 BTRFS_BLOCK_GROUP_RAID10))
5181 * If this block group has free space cache written out, we
5182 * need to make sure to load it if we are removing space. This
5183 * is because we need the unpinning stage to actually add the
5184 * space back to the block group, otherwise we will leak space.
5186 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5187 cache_block_group(cache, 1);
5189 byte_in_group = bytenr - cache->key.objectid;
5190 WARN_ON(byte_in_group > cache->key.offset);
5192 spin_lock(&cache->space_info->lock);
5193 spin_lock(&cache->lock);
5195 if (btrfs_test_opt(root, SPACE_CACHE) &&
5196 cache->disk_cache_state < BTRFS_DC_CLEAR)
5197 cache->disk_cache_state = BTRFS_DC_CLEAR;
5200 old_val = btrfs_block_group_used(&cache->item);
5201 num_bytes = min(total, cache->key.offset - byte_in_group);
5203 old_val += num_bytes;
5204 btrfs_set_block_group_used(&cache->item, old_val);
5205 cache->reserved -= num_bytes;
5206 cache->space_info->bytes_reserved -= num_bytes;
5207 cache->space_info->bytes_used += num_bytes;
5208 cache->space_info->disk_used += num_bytes * factor;
5209 spin_unlock(&cache->lock);
5210 spin_unlock(&cache->space_info->lock);
5212 old_val -= num_bytes;
5213 btrfs_set_block_group_used(&cache->item, old_val);
5214 cache->pinned += num_bytes;
5215 cache->space_info->bytes_pinned += num_bytes;
5216 cache->space_info->bytes_used -= num_bytes;
5217 cache->space_info->disk_used -= num_bytes * factor;
5218 spin_unlock(&cache->lock);
5219 spin_unlock(&cache->space_info->lock);
5221 set_extent_dirty(info->pinned_extents,
5222 bytenr, bytenr + num_bytes - 1,
5223 GFP_NOFS | __GFP_NOFAIL);
5225 btrfs_put_block_group(cache);
5227 bytenr += num_bytes;
5232 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5234 struct btrfs_block_group_cache *cache;
5237 spin_lock(&root->fs_info->block_group_cache_lock);
5238 bytenr = root->fs_info->first_logical_byte;
5239 spin_unlock(&root->fs_info->block_group_cache_lock);
5241 if (bytenr < (u64)-1)
5244 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5248 bytenr = cache->key.objectid;
5249 btrfs_put_block_group(cache);
5254 static int pin_down_extent(struct btrfs_root *root,
5255 struct btrfs_block_group_cache *cache,
5256 u64 bytenr, u64 num_bytes, int reserved)
5258 spin_lock(&cache->space_info->lock);
5259 spin_lock(&cache->lock);
5260 cache->pinned += num_bytes;
5261 cache->space_info->bytes_pinned += num_bytes;
5263 cache->reserved -= num_bytes;
5264 cache->space_info->bytes_reserved -= num_bytes;
5266 spin_unlock(&cache->lock);
5267 spin_unlock(&cache->space_info->lock);
5269 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5270 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5272 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5277 * this function must be called within transaction
5279 int btrfs_pin_extent(struct btrfs_root *root,
5280 u64 bytenr, u64 num_bytes, int reserved)
5282 struct btrfs_block_group_cache *cache;
5284 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5285 BUG_ON(!cache); /* Logic error */
5287 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5289 btrfs_put_block_group(cache);
5294 * this function must be called within transaction
5296 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5297 u64 bytenr, u64 num_bytes)
5299 struct btrfs_block_group_cache *cache;
5302 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5307 * pull in the free space cache (if any) so that our pin
5308 * removes the free space from the cache. We have load_only set
5309 * to one because the slow code to read in the free extents does check
5310 * the pinned extents.
5312 cache_block_group(cache, 1);
5314 pin_down_extent(root, cache, bytenr, num_bytes, 0);
5316 /* remove us from the free space cache (if we're there at all) */
5317 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5318 btrfs_put_block_group(cache);
5322 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5325 struct btrfs_block_group_cache *block_group;
5326 struct btrfs_caching_control *caching_ctl;
5328 block_group = btrfs_lookup_block_group(root->fs_info, start);
5332 cache_block_group(block_group, 0);
5333 caching_ctl = get_caching_control(block_group);
5337 BUG_ON(!block_group_cache_done(block_group));
5338 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5340 mutex_lock(&caching_ctl->mutex);
5342 if (start >= caching_ctl->progress) {
5343 ret = add_excluded_extent(root, start, num_bytes);
5344 } else if (start + num_bytes <= caching_ctl->progress) {
5345 ret = btrfs_remove_free_space(block_group,
5348 num_bytes = caching_ctl->progress - start;
5349 ret = btrfs_remove_free_space(block_group,
5354 num_bytes = (start + num_bytes) -
5355 caching_ctl->progress;
5356 start = caching_ctl->progress;
5357 ret = add_excluded_extent(root, start, num_bytes);
5360 mutex_unlock(&caching_ctl->mutex);
5361 put_caching_control(caching_ctl);
5363 btrfs_put_block_group(block_group);
5367 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5368 struct extent_buffer *eb)
5370 struct btrfs_file_extent_item *item;
5371 struct btrfs_key key;
5375 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5378 for (i = 0; i < btrfs_header_nritems(eb); i++) {
5379 btrfs_item_key_to_cpu(eb, &key, i);
5380 if (key.type != BTRFS_EXTENT_DATA_KEY)
5382 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5383 found_type = btrfs_file_extent_type(eb, item);
5384 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5386 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5388 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5389 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5390 __exclude_logged_extent(log, key.objectid, key.offset);
5397 * btrfs_update_reserved_bytes - update the block_group and space info counters
5398 * @cache: The cache we are manipulating
5399 * @num_bytes: The number of bytes in question
5400 * @reserve: One of the reservation enums
5402 * This is called by the allocator when it reserves space, or by somebody who is
5403 * freeing space that was never actually used on disk. For example if you
5404 * reserve some space for a new leaf in transaction A and before transaction A
5405 * commits you free that leaf, you call this with reserve set to 0 in order to
5406 * clear the reservation.
5408 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5409 * ENOSPC accounting. For data we handle the reservation through clearing the
5410 * delalloc bits in the io_tree. We have to do this since we could end up
5411 * allocating less disk space for the amount of data we have reserved in the
5412 * case of compression.
5414 * If this is a reservation and the block group has become read only we cannot
5415 * make the reservation and return -EAGAIN, otherwise this function always
5418 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5419 u64 num_bytes, int reserve)
5421 struct btrfs_space_info *space_info = cache->space_info;
5424 spin_lock(&space_info->lock);
5425 spin_lock(&cache->lock);
5426 if (reserve != RESERVE_FREE) {
5430 cache->reserved += num_bytes;
5431 space_info->bytes_reserved += num_bytes;
5432 if (reserve == RESERVE_ALLOC) {
5433 trace_btrfs_space_reservation(cache->fs_info,
5434 "space_info", space_info->flags,
5436 space_info->bytes_may_use -= num_bytes;
5441 space_info->bytes_readonly += num_bytes;
5442 cache->reserved -= num_bytes;
5443 space_info->bytes_reserved -= num_bytes;
5445 spin_unlock(&cache->lock);
5446 spin_unlock(&space_info->lock);
5450 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5451 struct btrfs_root *root)
5453 struct btrfs_fs_info *fs_info = root->fs_info;
5454 struct btrfs_caching_control *next;
5455 struct btrfs_caching_control *caching_ctl;
5456 struct btrfs_block_group_cache *cache;
5457 struct btrfs_space_info *space_info;
5459 down_write(&fs_info->extent_commit_sem);
5461 list_for_each_entry_safe(caching_ctl, next,
5462 &fs_info->caching_block_groups, list) {
5463 cache = caching_ctl->block_group;
5464 if (block_group_cache_done(cache)) {
5465 cache->last_byte_to_unpin = (u64)-1;
5466 list_del_init(&caching_ctl->list);
5467 put_caching_control(caching_ctl);
5469 cache->last_byte_to_unpin = caching_ctl->progress;
5473 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5474 fs_info->pinned_extents = &fs_info->freed_extents[1];
5476 fs_info->pinned_extents = &fs_info->freed_extents[0];
5478 up_write(&fs_info->extent_commit_sem);
5480 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5481 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5483 update_global_block_rsv(fs_info);
5486 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5488 struct btrfs_fs_info *fs_info = root->fs_info;
5489 struct btrfs_block_group_cache *cache = NULL;
5490 struct btrfs_space_info *space_info;
5491 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5495 while (start <= end) {
5498 start >= cache->key.objectid + cache->key.offset) {
5500 btrfs_put_block_group(cache);
5501 cache = btrfs_lookup_block_group(fs_info, start);
5502 BUG_ON(!cache); /* Logic error */
5505 len = cache->key.objectid + cache->key.offset - start;
5506 len = min(len, end + 1 - start);
5508 if (start < cache->last_byte_to_unpin) {
5509 len = min(len, cache->last_byte_to_unpin - start);
5510 btrfs_add_free_space(cache, start, len);
5514 space_info = cache->space_info;
5516 spin_lock(&space_info->lock);
5517 spin_lock(&cache->lock);
5518 cache->pinned -= len;
5519 space_info->bytes_pinned -= len;
5521 space_info->bytes_readonly += len;
5524 spin_unlock(&cache->lock);
5525 if (!readonly && global_rsv->space_info == space_info) {
5526 spin_lock(&global_rsv->lock);
5527 if (!global_rsv->full) {
5528 len = min(len, global_rsv->size -
5529 global_rsv->reserved);
5530 global_rsv->reserved += len;
5531 space_info->bytes_may_use += len;
5532 if (global_rsv->reserved >= global_rsv->size)
5533 global_rsv->full = 1;
5535 spin_unlock(&global_rsv->lock);
5537 spin_unlock(&space_info->lock);
5541 btrfs_put_block_group(cache);
5545 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5546 struct btrfs_root *root)
5548 struct btrfs_fs_info *fs_info = root->fs_info;
5549 struct extent_io_tree *unpin;
5557 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5558 unpin = &fs_info->freed_extents[1];
5560 unpin = &fs_info->freed_extents[0];
5563 ret = find_first_extent_bit(unpin, 0, &start, &end,
5564 EXTENT_DIRTY, NULL);
5568 if (btrfs_test_opt(root, DISCARD))
5569 ret = btrfs_discard_extent(root, start,
5570 end + 1 - start, NULL);
5572 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5573 unpin_extent_range(root, start, end);
5580 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5581 u64 owner, u64 root_objectid)
5583 struct btrfs_space_info *space_info;
5586 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5587 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5588 flags = BTRFS_BLOCK_GROUP_SYSTEM;
5590 flags = BTRFS_BLOCK_GROUP_METADATA;
5592 flags = BTRFS_BLOCK_GROUP_DATA;
5595 space_info = __find_space_info(fs_info, flags);
5596 BUG_ON(!space_info); /* Logic bug */
5597 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5601 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5602 struct btrfs_root *root,
5603 u64 bytenr, u64 num_bytes, u64 parent,
5604 u64 root_objectid, u64 owner_objectid,
5605 u64 owner_offset, int refs_to_drop,
5606 struct btrfs_delayed_extent_op *extent_op)
5608 struct btrfs_key key;
5609 struct btrfs_path *path;
5610 struct btrfs_fs_info *info = root->fs_info;
5611 struct btrfs_root *extent_root = info->extent_root;
5612 struct extent_buffer *leaf;
5613 struct btrfs_extent_item *ei;
5614 struct btrfs_extent_inline_ref *iref;
5617 int extent_slot = 0;
5618 int found_extent = 0;
5622 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5625 path = btrfs_alloc_path();
5630 path->leave_spinning = 1;
5632 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5633 BUG_ON(!is_data && refs_to_drop != 1);
5636 skinny_metadata = 0;
5638 ret = lookup_extent_backref(trans, extent_root, path, &iref,
5639 bytenr, num_bytes, parent,
5640 root_objectid, owner_objectid,
5643 extent_slot = path->slots[0];
5644 while (extent_slot >= 0) {
5645 btrfs_item_key_to_cpu(path->nodes[0], &key,
5647 if (key.objectid != bytenr)
5649 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5650 key.offset == num_bytes) {
5654 if (key.type == BTRFS_METADATA_ITEM_KEY &&
5655 key.offset == owner_objectid) {
5659 if (path->slots[0] - extent_slot > 5)
5663 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5664 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5665 if (found_extent && item_size < sizeof(*ei))
5668 if (!found_extent) {
5670 ret = remove_extent_backref(trans, extent_root, path,
5674 btrfs_abort_transaction(trans, extent_root, ret);
5677 btrfs_release_path(path);
5678 path->leave_spinning = 1;
5680 key.objectid = bytenr;
5681 key.type = BTRFS_EXTENT_ITEM_KEY;
5682 key.offset = num_bytes;
5684 if (!is_data && skinny_metadata) {
5685 key.type = BTRFS_METADATA_ITEM_KEY;
5686 key.offset = owner_objectid;
5689 ret = btrfs_search_slot(trans, extent_root,
5691 if (ret > 0 && skinny_metadata && path->slots[0]) {
5693 * Couldn't find our skinny metadata item,
5694 * see if we have ye olde extent item.
5697 btrfs_item_key_to_cpu(path->nodes[0], &key,
5699 if (key.objectid == bytenr &&
5700 key.type == BTRFS_EXTENT_ITEM_KEY &&
5701 key.offset == num_bytes)
5705 if (ret > 0 && skinny_metadata) {
5706 skinny_metadata = false;
5707 key.type = BTRFS_EXTENT_ITEM_KEY;
5708 key.offset = num_bytes;
5709 btrfs_release_path(path);
5710 ret = btrfs_search_slot(trans, extent_root,
5715 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5718 btrfs_print_leaf(extent_root,
5722 btrfs_abort_transaction(trans, extent_root, ret);
5725 extent_slot = path->slots[0];
5727 } else if (ret == -ENOENT) {
5728 btrfs_print_leaf(extent_root, path->nodes[0]);
5731 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
5732 bytenr, parent, root_objectid, owner_objectid,
5735 btrfs_abort_transaction(trans, extent_root, ret);
5739 leaf = path->nodes[0];
5740 item_size = btrfs_item_size_nr(leaf, extent_slot);
5741 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5742 if (item_size < sizeof(*ei)) {
5743 BUG_ON(found_extent || extent_slot != path->slots[0]);
5744 ret = convert_extent_item_v0(trans, extent_root, path,
5747 btrfs_abort_transaction(trans, extent_root, ret);
5751 btrfs_release_path(path);
5752 path->leave_spinning = 1;
5754 key.objectid = bytenr;
5755 key.type = BTRFS_EXTENT_ITEM_KEY;
5756 key.offset = num_bytes;
5758 ret = btrfs_search_slot(trans, extent_root, &key, path,
5761 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5763 btrfs_print_leaf(extent_root, path->nodes[0]);
5766 btrfs_abort_transaction(trans, extent_root, ret);
5770 extent_slot = path->slots[0];
5771 leaf = path->nodes[0];
5772 item_size = btrfs_item_size_nr(leaf, extent_slot);
5775 BUG_ON(item_size < sizeof(*ei));
5776 ei = btrfs_item_ptr(leaf, extent_slot,
5777 struct btrfs_extent_item);
5778 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5779 key.type == BTRFS_EXTENT_ITEM_KEY) {
5780 struct btrfs_tree_block_info *bi;
5781 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5782 bi = (struct btrfs_tree_block_info *)(ei + 1);
5783 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5786 refs = btrfs_extent_refs(leaf, ei);
5787 if (refs < refs_to_drop) {
5788 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5789 "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5791 btrfs_abort_transaction(trans, extent_root, ret);
5794 refs -= refs_to_drop;
5798 __run_delayed_extent_op(extent_op, leaf, ei);
5800 * In the case of inline back ref, reference count will
5801 * be updated by remove_extent_backref
5804 BUG_ON(!found_extent);
5806 btrfs_set_extent_refs(leaf, ei, refs);
5807 btrfs_mark_buffer_dirty(leaf);
5810 ret = remove_extent_backref(trans, extent_root, path,
5814 btrfs_abort_transaction(trans, extent_root, ret);
5818 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5822 BUG_ON(is_data && refs_to_drop !=
5823 extent_data_ref_count(root, path, iref));
5825 BUG_ON(path->slots[0] != extent_slot);
5827 BUG_ON(path->slots[0] != extent_slot + 1);
5828 path->slots[0] = extent_slot;
5833 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5836 btrfs_abort_transaction(trans, extent_root, ret);
5839 btrfs_release_path(path);
5842 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5844 btrfs_abort_transaction(trans, extent_root, ret);
5849 ret = update_block_group(root, bytenr, num_bytes, 0);
5851 btrfs_abort_transaction(trans, extent_root, ret);
5856 btrfs_free_path(path);
5861 * when we free an block, it is possible (and likely) that we free the last
5862 * delayed ref for that extent as well. This searches the delayed ref tree for
5863 * a given extent, and if there are no other delayed refs to be processed, it
5864 * removes it from the tree.
5866 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5867 struct btrfs_root *root, u64 bytenr)
5869 struct btrfs_delayed_ref_head *head;
5870 struct btrfs_delayed_ref_root *delayed_refs;
5871 struct btrfs_delayed_ref_node *ref;
5872 struct rb_node *node;
5875 delayed_refs = &trans->transaction->delayed_refs;
5876 spin_lock(&delayed_refs->lock);
5877 head = btrfs_find_delayed_ref_head(trans, bytenr);
5881 node = rb_prev(&head->node.rb_node);
5885 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5887 /* there are still entries for this ref, we can't drop it */
5888 if (ref->bytenr == bytenr)
5891 if (head->extent_op) {
5892 if (!head->must_insert_reserved)
5894 btrfs_free_delayed_extent_op(head->extent_op);
5895 head->extent_op = NULL;
5899 * waiting for the lock here would deadlock. If someone else has it
5900 * locked they are already in the process of dropping it anyway
5902 if (!mutex_trylock(&head->mutex))
5906 * at this point we have a head with no other entries. Go
5907 * ahead and process it.
5909 head->node.in_tree = 0;
5910 rb_erase(&head->node.rb_node, &delayed_refs->root);
5912 delayed_refs->num_entries--;
5915 * we don't take a ref on the node because we're removing it from the
5916 * tree, so we just steal the ref the tree was holding.
5918 delayed_refs->num_heads--;
5919 if (list_empty(&head->cluster))
5920 delayed_refs->num_heads_ready--;
5922 list_del_init(&head->cluster);
5923 spin_unlock(&delayed_refs->lock);
5925 BUG_ON(head->extent_op);
5926 if (head->must_insert_reserved)
5929 mutex_unlock(&head->mutex);
5930 btrfs_put_delayed_ref(&head->node);
5933 spin_unlock(&delayed_refs->lock);
5937 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5938 struct btrfs_root *root,
5939 struct extent_buffer *buf,
5940 u64 parent, int last_ref)
5942 struct btrfs_block_group_cache *cache = NULL;
5946 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5947 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5948 buf->start, buf->len,
5949 parent, root->root_key.objectid,
5950 btrfs_header_level(buf),
5951 BTRFS_DROP_DELAYED_REF, NULL, 0);
5952 BUG_ON(ret); /* -ENOMEM */
5958 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5960 if (btrfs_header_generation(buf) == trans->transid) {
5961 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5962 ret = check_ref_cleanup(trans, root, buf->start);
5967 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5968 pin_down_extent(root, cache, buf->start, buf->len, 1);
5972 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5974 btrfs_add_free_space(cache, buf->start, buf->len);
5975 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5976 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
5981 add_pinned_bytes(root->fs_info, buf->len,
5982 btrfs_header_level(buf),
5983 root->root_key.objectid);
5986 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5989 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5990 btrfs_put_block_group(cache);
5993 /* Can return -ENOMEM */
5994 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5995 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5996 u64 owner, u64 offset, int for_cow)
5999 struct btrfs_fs_info *fs_info = root->fs_info;
6001 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6004 * tree log blocks never actually go into the extent allocation
6005 * tree, just update pinning info and exit early.
6007 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6008 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6009 /* unlocks the pinned mutex */
6010 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6012 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6013 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6015 parent, root_objectid, (int)owner,
6016 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6018 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6020 parent, root_objectid, owner,
6021 offset, BTRFS_DROP_DELAYED_REF,
6027 static u64 stripe_align(struct btrfs_root *root,
6028 struct btrfs_block_group_cache *cache,
6029 u64 val, u64 num_bytes)
6031 u64 ret = ALIGN(val, root->stripesize);
6036 * when we wait for progress in the block group caching, its because
6037 * our allocation attempt failed at least once. So, we must sleep
6038 * and let some progress happen before we try again.
6040 * This function will sleep at least once waiting for new free space to
6041 * show up, and then it will check the block group free space numbers
6042 * for our min num_bytes. Another option is to have it go ahead
6043 * and look in the rbtree for a free extent of a given size, but this
6046 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6047 * any of the information in this block group.
6049 static noinline void
6050 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6053 struct btrfs_caching_control *caching_ctl;
6055 caching_ctl = get_caching_control(cache);
6059 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6060 (cache->free_space_ctl->free_space >= num_bytes));
6062 put_caching_control(caching_ctl);
6066 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6068 struct btrfs_caching_control *caching_ctl;
6071 caching_ctl = get_caching_control(cache);
6073 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6075 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6076 if (cache->cached == BTRFS_CACHE_ERROR)
6078 put_caching_control(caching_ctl);
6082 int __get_raid_index(u64 flags)
6084 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6085 return BTRFS_RAID_RAID10;
6086 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6087 return BTRFS_RAID_RAID1;
6088 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6089 return BTRFS_RAID_DUP;
6090 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6091 return BTRFS_RAID_RAID0;
6092 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6093 return BTRFS_RAID_RAID5;
6094 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6095 return BTRFS_RAID_RAID6;
6097 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6100 static int get_block_group_index(struct btrfs_block_group_cache *cache)
6102 return __get_raid_index(cache->flags);
6105 enum btrfs_loop_type {
6106 LOOP_CACHING_NOWAIT = 0,
6107 LOOP_CACHING_WAIT = 1,
6108 LOOP_ALLOC_CHUNK = 2,
6109 LOOP_NO_EMPTY_SIZE = 3,
6113 * walks the btree of allocated extents and find a hole of a given size.
6114 * The key ins is changed to record the hole:
6115 * ins->objectid == start position
6116 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6117 * ins->offset == the size of the hole.
6118 * Any available blocks before search_start are skipped.
6120 * If there is no suitable free space, we will record the max size of
6121 * the free space extent currently.
6123 static noinline int find_free_extent(struct btrfs_root *orig_root,
6124 u64 num_bytes, u64 empty_size,
6125 u64 hint_byte, struct btrfs_key *ins,
6129 struct btrfs_root *root = orig_root->fs_info->extent_root;
6130 struct btrfs_free_cluster *last_ptr = NULL;
6131 struct btrfs_block_group_cache *block_group = NULL;
6132 struct btrfs_block_group_cache *used_block_group;
6133 u64 search_start = 0;
6134 u64 max_extent_size = 0;
6135 int empty_cluster = 2 * 1024 * 1024;
6136 struct btrfs_space_info *space_info;
6138 int index = __get_raid_index(flags);
6139 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6140 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6141 bool found_uncached_bg = false;
6142 bool failed_cluster_refill = false;
6143 bool failed_alloc = false;
6144 bool use_cluster = true;
6145 bool have_caching_bg = false;
6147 WARN_ON(num_bytes < root->sectorsize);
6148 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6152 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6154 space_info = __find_space_info(root->fs_info, flags);
6156 btrfs_err(root->fs_info, "No space info for %llu", flags);
6161 * If the space info is for both data and metadata it means we have a
6162 * small filesystem and we can't use the clustering stuff.
6164 if (btrfs_mixed_space_info(space_info))
6165 use_cluster = false;
6167 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6168 last_ptr = &root->fs_info->meta_alloc_cluster;
6169 if (!btrfs_test_opt(root, SSD))
6170 empty_cluster = 64 * 1024;
6173 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6174 btrfs_test_opt(root, SSD)) {
6175 last_ptr = &root->fs_info->data_alloc_cluster;
6179 spin_lock(&last_ptr->lock);
6180 if (last_ptr->block_group)
6181 hint_byte = last_ptr->window_start;
6182 spin_unlock(&last_ptr->lock);
6185 search_start = max(search_start, first_logical_byte(root, 0));
6186 search_start = max(search_start, hint_byte);
6191 if (search_start == hint_byte) {
6192 block_group = btrfs_lookup_block_group(root->fs_info,
6194 used_block_group = block_group;
6196 * we don't want to use the block group if it doesn't match our
6197 * allocation bits, or if its not cached.
6199 * However if we are re-searching with an ideal block group
6200 * picked out then we don't care that the block group is cached.
6202 if (block_group && block_group_bits(block_group, flags) &&
6203 block_group->cached != BTRFS_CACHE_NO) {
6204 down_read(&space_info->groups_sem);
6205 if (list_empty(&block_group->list) ||
6208 * someone is removing this block group,
6209 * we can't jump into the have_block_group
6210 * target because our list pointers are not
6213 btrfs_put_block_group(block_group);
6214 up_read(&space_info->groups_sem);
6216 index = get_block_group_index(block_group);
6217 goto have_block_group;
6219 } else if (block_group) {
6220 btrfs_put_block_group(block_group);
6224 have_caching_bg = false;
6225 down_read(&space_info->groups_sem);
6226 list_for_each_entry(block_group, &space_info->block_groups[index],
6231 used_block_group = block_group;
6232 btrfs_get_block_group(block_group);
6233 search_start = block_group->key.objectid;
6236 * this can happen if we end up cycling through all the
6237 * raid types, but we want to make sure we only allocate
6238 * for the proper type.
6240 if (!block_group_bits(block_group, flags)) {
6241 u64 extra = BTRFS_BLOCK_GROUP_DUP |
6242 BTRFS_BLOCK_GROUP_RAID1 |
6243 BTRFS_BLOCK_GROUP_RAID5 |
6244 BTRFS_BLOCK_GROUP_RAID6 |
6245 BTRFS_BLOCK_GROUP_RAID10;
6248 * if they asked for extra copies and this block group
6249 * doesn't provide them, bail. This does allow us to
6250 * fill raid0 from raid1.
6252 if ((flags & extra) && !(block_group->flags & extra))
6257 cached = block_group_cache_done(block_group);
6258 if (unlikely(!cached)) {
6259 found_uncached_bg = true;
6260 ret = cache_block_group(block_group, 0);
6265 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6267 if (unlikely(block_group->ro))
6271 * Ok we want to try and use the cluster allocator, so
6275 unsigned long aligned_cluster;
6277 * the refill lock keeps out other
6278 * people trying to start a new cluster
6280 spin_lock(&last_ptr->refill_lock);
6281 used_block_group = last_ptr->block_group;
6282 if (used_block_group != block_group &&
6283 (!used_block_group ||
6284 used_block_group->ro ||
6285 !block_group_bits(used_block_group, flags))) {
6286 used_block_group = block_group;
6287 goto refill_cluster;
6290 if (used_block_group != block_group)
6291 btrfs_get_block_group(used_block_group);
6293 offset = btrfs_alloc_from_cluster(used_block_group,
6296 used_block_group->key.objectid,
6299 /* we have a block, we're done */
6300 spin_unlock(&last_ptr->refill_lock);
6301 trace_btrfs_reserve_extent_cluster(root,
6302 block_group, search_start, num_bytes);
6306 WARN_ON(last_ptr->block_group != used_block_group);
6307 if (used_block_group != block_group) {
6308 btrfs_put_block_group(used_block_group);
6309 used_block_group = block_group;
6312 BUG_ON(used_block_group != block_group);
6313 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6314 * set up a new clusters, so lets just skip it
6315 * and let the allocator find whatever block
6316 * it can find. If we reach this point, we
6317 * will have tried the cluster allocator
6318 * plenty of times and not have found
6319 * anything, so we are likely way too
6320 * fragmented for the clustering stuff to find
6323 * However, if the cluster is taken from the
6324 * current block group, release the cluster
6325 * first, so that we stand a better chance of
6326 * succeeding in the unclustered
6328 if (loop >= LOOP_NO_EMPTY_SIZE &&
6329 last_ptr->block_group != block_group) {
6330 spin_unlock(&last_ptr->refill_lock);
6331 goto unclustered_alloc;
6335 * this cluster didn't work out, free it and
6338 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6340 if (loop >= LOOP_NO_EMPTY_SIZE) {
6341 spin_unlock(&last_ptr->refill_lock);
6342 goto unclustered_alloc;
6345 aligned_cluster = max_t(unsigned long,
6346 empty_cluster + empty_size,
6347 block_group->full_stripe_len);
6349 /* allocate a cluster in this block group */
6350 ret = btrfs_find_space_cluster(root, block_group,
6351 last_ptr, search_start,
6356 * now pull our allocation out of this
6359 offset = btrfs_alloc_from_cluster(block_group,
6365 /* we found one, proceed */
6366 spin_unlock(&last_ptr->refill_lock);
6367 trace_btrfs_reserve_extent_cluster(root,
6368 block_group, search_start,
6372 } else if (!cached && loop > LOOP_CACHING_NOWAIT
6373 && !failed_cluster_refill) {
6374 spin_unlock(&last_ptr->refill_lock);
6376 failed_cluster_refill = true;
6377 wait_block_group_cache_progress(block_group,
6378 num_bytes + empty_cluster + empty_size);
6379 goto have_block_group;
6383 * at this point we either didn't find a cluster
6384 * or we weren't able to allocate a block from our
6385 * cluster. Free the cluster we've been trying
6386 * to use, and go to the next block group
6388 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6389 spin_unlock(&last_ptr->refill_lock);
6394 spin_lock(&block_group->free_space_ctl->tree_lock);
6396 block_group->free_space_ctl->free_space <
6397 num_bytes + empty_cluster + empty_size) {
6398 if (block_group->free_space_ctl->free_space >
6401 block_group->free_space_ctl->free_space;
6402 spin_unlock(&block_group->free_space_ctl->tree_lock);
6405 spin_unlock(&block_group->free_space_ctl->tree_lock);
6407 offset = btrfs_find_space_for_alloc(block_group, search_start,
6408 num_bytes, empty_size,
6411 * If we didn't find a chunk, and we haven't failed on this
6412 * block group before, and this block group is in the middle of
6413 * caching and we are ok with waiting, then go ahead and wait
6414 * for progress to be made, and set failed_alloc to true.
6416 * If failed_alloc is true then we've already waited on this
6417 * block group once and should move on to the next block group.
6419 if (!offset && !failed_alloc && !cached &&
6420 loop > LOOP_CACHING_NOWAIT) {
6421 wait_block_group_cache_progress(block_group,
6422 num_bytes + empty_size);
6423 failed_alloc = true;
6424 goto have_block_group;
6425 } else if (!offset) {
6427 have_caching_bg = true;
6431 search_start = stripe_align(root, used_block_group,
6434 /* move on to the next group */
6435 if (search_start + num_bytes >
6436 used_block_group->key.objectid + used_block_group->key.offset) {
6437 btrfs_add_free_space(used_block_group, offset, num_bytes);
6441 if (offset < search_start)
6442 btrfs_add_free_space(used_block_group, offset,
6443 search_start - offset);
6444 BUG_ON(offset > search_start);
6446 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6448 if (ret == -EAGAIN) {
6449 btrfs_add_free_space(used_block_group, offset, num_bytes);
6453 /* we are all good, lets return */
6454 ins->objectid = search_start;
6455 ins->offset = num_bytes;
6457 trace_btrfs_reserve_extent(orig_root, block_group,
6458 search_start, num_bytes);
6459 if (used_block_group != block_group)
6460 btrfs_put_block_group(used_block_group);
6461 btrfs_put_block_group(block_group);
6464 failed_cluster_refill = false;
6465 failed_alloc = false;
6466 BUG_ON(index != get_block_group_index(block_group));
6467 if (used_block_group != block_group)
6468 btrfs_put_block_group(used_block_group);
6469 btrfs_put_block_group(block_group);
6471 up_read(&space_info->groups_sem);
6473 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6476 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6480 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6481 * caching kthreads as we move along
6482 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6483 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6484 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6487 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6490 if (loop == LOOP_ALLOC_CHUNK) {
6491 struct btrfs_trans_handle *trans;
6493 trans = btrfs_join_transaction(root);
6494 if (IS_ERR(trans)) {
6495 ret = PTR_ERR(trans);
6499 ret = do_chunk_alloc(trans, root, flags,
6502 * Do not bail out on ENOSPC since we
6503 * can do more things.
6505 if (ret < 0 && ret != -ENOSPC)
6506 btrfs_abort_transaction(trans,
6510 btrfs_end_transaction(trans, root);
6515 if (loop == LOOP_NO_EMPTY_SIZE) {
6521 } else if (!ins->objectid) {
6523 } else if (ins->objectid) {
6528 ins->offset = max_extent_size;
6532 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6533 int dump_block_groups)
6535 struct btrfs_block_group_cache *cache;
6538 spin_lock(&info->lock);
6539 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6541 info->total_bytes - info->bytes_used - info->bytes_pinned -
6542 info->bytes_reserved - info->bytes_readonly,
6543 (info->full) ? "" : "not ");
6544 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6545 "reserved=%llu, may_use=%llu, readonly=%llu\n",
6546 info->total_bytes, info->bytes_used, info->bytes_pinned,
6547 info->bytes_reserved, info->bytes_may_use,
6548 info->bytes_readonly);
6549 spin_unlock(&info->lock);
6551 if (!dump_block_groups)
6554 down_read(&info->groups_sem);
6556 list_for_each_entry(cache, &info->block_groups[index], list) {
6557 spin_lock(&cache->lock);
6558 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6559 cache->key.objectid, cache->key.offset,
6560 btrfs_block_group_used(&cache->item), cache->pinned,
6561 cache->reserved, cache->ro ? "[readonly]" : "");
6562 btrfs_dump_free_space(cache, bytes);
6563 spin_unlock(&cache->lock);
6565 if (++index < BTRFS_NR_RAID_TYPES)
6567 up_read(&info->groups_sem);
6570 int btrfs_reserve_extent(struct btrfs_root *root,
6571 u64 num_bytes, u64 min_alloc_size,
6572 u64 empty_size, u64 hint_byte,
6573 struct btrfs_key *ins, int is_data)
6575 bool final_tried = false;
6579 flags = btrfs_get_alloc_profile(root, is_data);
6581 WARN_ON(num_bytes < root->sectorsize);
6582 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6585 if (ret == -ENOSPC) {
6586 if (!final_tried && ins->offset) {
6587 num_bytes = min(num_bytes >> 1, ins->offset);
6588 num_bytes = round_down(num_bytes, root->sectorsize);
6589 num_bytes = max(num_bytes, min_alloc_size);
6590 if (num_bytes == min_alloc_size)
6593 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6594 struct btrfs_space_info *sinfo;
6596 sinfo = __find_space_info(root->fs_info, flags);
6597 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6600 dump_space_info(sinfo, num_bytes, 1);
6607 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6608 u64 start, u64 len, int pin)
6610 struct btrfs_block_group_cache *cache;
6613 cache = btrfs_lookup_block_group(root->fs_info, start);
6615 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6620 if (btrfs_test_opt(root, DISCARD))
6621 ret = btrfs_discard_extent(root, start, len, NULL);
6624 pin_down_extent(root, cache, start, len, 1);
6626 btrfs_add_free_space(cache, start, len);
6627 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6629 btrfs_put_block_group(cache);
6631 trace_btrfs_reserved_extent_free(root, start, len);
6636 int btrfs_free_reserved_extent(struct btrfs_root *root,
6639 return __btrfs_free_reserved_extent(root, start, len, 0);
6642 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6645 return __btrfs_free_reserved_extent(root, start, len, 1);
6648 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6649 struct btrfs_root *root,
6650 u64 parent, u64 root_objectid,
6651 u64 flags, u64 owner, u64 offset,
6652 struct btrfs_key *ins, int ref_mod)
6655 struct btrfs_fs_info *fs_info = root->fs_info;
6656 struct btrfs_extent_item *extent_item;
6657 struct btrfs_extent_inline_ref *iref;
6658 struct btrfs_path *path;
6659 struct extent_buffer *leaf;
6664 type = BTRFS_SHARED_DATA_REF_KEY;
6666 type = BTRFS_EXTENT_DATA_REF_KEY;
6668 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6670 path = btrfs_alloc_path();
6674 path->leave_spinning = 1;
6675 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6678 btrfs_free_path(path);
6682 leaf = path->nodes[0];
6683 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6684 struct btrfs_extent_item);
6685 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6686 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6687 btrfs_set_extent_flags(leaf, extent_item,
6688 flags | BTRFS_EXTENT_FLAG_DATA);
6690 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6691 btrfs_set_extent_inline_ref_type(leaf, iref, type);
6693 struct btrfs_shared_data_ref *ref;
6694 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6695 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6696 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6698 struct btrfs_extent_data_ref *ref;
6699 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6700 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6701 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6702 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6703 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6706 btrfs_mark_buffer_dirty(path->nodes[0]);
6707 btrfs_free_path(path);
6709 ret = update_block_group(root, ins->objectid, ins->offset, 1);
6710 if (ret) { /* -ENOENT, logic error */
6711 btrfs_err(fs_info, "update block group failed for %llu %llu",
6712 ins->objectid, ins->offset);
6715 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6719 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6720 struct btrfs_root *root,
6721 u64 parent, u64 root_objectid,
6722 u64 flags, struct btrfs_disk_key *key,
6723 int level, struct btrfs_key *ins)
6726 struct btrfs_fs_info *fs_info = root->fs_info;
6727 struct btrfs_extent_item *extent_item;
6728 struct btrfs_tree_block_info *block_info;
6729 struct btrfs_extent_inline_ref *iref;
6730 struct btrfs_path *path;
6731 struct extent_buffer *leaf;
6732 u32 size = sizeof(*extent_item) + sizeof(*iref);
6733 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6736 if (!skinny_metadata)
6737 size += sizeof(*block_info);
6739 path = btrfs_alloc_path();
6741 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6746 path->leave_spinning = 1;
6747 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6750 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6752 btrfs_free_path(path);
6756 leaf = path->nodes[0];
6757 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6758 struct btrfs_extent_item);
6759 btrfs_set_extent_refs(leaf, extent_item, 1);
6760 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6761 btrfs_set_extent_flags(leaf, extent_item,
6762 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6764 if (skinny_metadata) {
6765 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6767 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6768 btrfs_set_tree_block_key(leaf, block_info, key);
6769 btrfs_set_tree_block_level(leaf, block_info, level);
6770 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6774 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6775 btrfs_set_extent_inline_ref_type(leaf, iref,
6776 BTRFS_SHARED_BLOCK_REF_KEY);
6777 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6779 btrfs_set_extent_inline_ref_type(leaf, iref,
6780 BTRFS_TREE_BLOCK_REF_KEY);
6781 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6784 btrfs_mark_buffer_dirty(leaf);
6785 btrfs_free_path(path);
6787 ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6788 if (ret) { /* -ENOENT, logic error */
6789 btrfs_err(fs_info, "update block group failed for %llu %llu",
6790 ins->objectid, ins->offset);
6794 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6798 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6799 struct btrfs_root *root,
6800 u64 root_objectid, u64 owner,
6801 u64 offset, struct btrfs_key *ins)
6805 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6807 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6809 root_objectid, owner, offset,
6810 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6815 * this is used by the tree logging recovery code. It records that
6816 * an extent has been allocated and makes sure to clear the free
6817 * space cache bits as well
6819 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6820 struct btrfs_root *root,
6821 u64 root_objectid, u64 owner, u64 offset,
6822 struct btrfs_key *ins)
6825 struct btrfs_block_group_cache *block_group;
6828 * Mixed block groups will exclude before processing the log so we only
6829 * need to do the exlude dance if this fs isn't mixed.
6831 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6832 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6837 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6841 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6842 RESERVE_ALLOC_NO_ACCOUNT);
6843 BUG_ON(ret); /* logic error */
6844 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6845 0, owner, offset, ins, 1);
6846 btrfs_put_block_group(block_group);
6850 static struct extent_buffer *
6851 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6852 u64 bytenr, u32 blocksize, int level)
6854 struct extent_buffer *buf;
6856 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6858 return ERR_PTR(-ENOMEM);
6859 btrfs_set_header_generation(buf, trans->transid);
6860 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6861 btrfs_tree_lock(buf);
6862 clean_tree_block(trans, root, buf);
6863 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6865 btrfs_set_lock_blocking(buf);
6866 btrfs_set_buffer_uptodate(buf);
6868 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6870 * we allow two log transactions at a time, use different
6871 * EXENT bit to differentiate dirty pages.
6873 if (root->log_transid % 2 == 0)
6874 set_extent_dirty(&root->dirty_log_pages, buf->start,
6875 buf->start + buf->len - 1, GFP_NOFS);
6877 set_extent_new(&root->dirty_log_pages, buf->start,
6878 buf->start + buf->len - 1, GFP_NOFS);
6880 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6881 buf->start + buf->len - 1, GFP_NOFS);
6883 trans->blocks_used++;
6884 /* this returns a buffer locked for blocking */
6888 static struct btrfs_block_rsv *
6889 use_block_rsv(struct btrfs_trans_handle *trans,
6890 struct btrfs_root *root, u32 blocksize)
6892 struct btrfs_block_rsv *block_rsv;
6893 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6895 bool global_updated = false;
6897 block_rsv = get_block_rsv(trans, root);
6899 if (unlikely(block_rsv->size == 0))
6902 ret = block_rsv_use_bytes(block_rsv, blocksize);
6906 if (block_rsv->failfast)
6907 return ERR_PTR(ret);
6909 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6910 global_updated = true;
6911 update_global_block_rsv(root->fs_info);
6915 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6916 static DEFINE_RATELIMIT_STATE(_rs,
6917 DEFAULT_RATELIMIT_INTERVAL * 10,
6918 /*DEFAULT_RATELIMIT_BURST*/ 1);
6919 if (__ratelimit(&_rs))
6921 "btrfs: block rsv returned %d\n", ret);
6924 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6925 BTRFS_RESERVE_NO_FLUSH);
6929 * If we couldn't reserve metadata bytes try and use some from
6930 * the global reserve if its space type is the same as the global
6933 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6934 block_rsv->space_info == global_rsv->space_info) {
6935 ret = block_rsv_use_bytes(global_rsv, blocksize);
6939 return ERR_PTR(ret);
6942 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6943 struct btrfs_block_rsv *block_rsv, u32 blocksize)
6945 block_rsv_add_bytes(block_rsv, blocksize, 0);
6946 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6950 * finds a free extent and does all the dirty work required for allocation
6951 * returns the key for the extent through ins, and a tree buffer for
6952 * the first block of the extent through buf.
6954 * returns the tree buffer or NULL.
6956 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6957 struct btrfs_root *root, u32 blocksize,
6958 u64 parent, u64 root_objectid,
6959 struct btrfs_disk_key *key, int level,
6960 u64 hint, u64 empty_size)
6962 struct btrfs_key ins;
6963 struct btrfs_block_rsv *block_rsv;
6964 struct extent_buffer *buf;
6967 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6970 block_rsv = use_block_rsv(trans, root, blocksize);
6971 if (IS_ERR(block_rsv))
6972 return ERR_CAST(block_rsv);
6974 ret = btrfs_reserve_extent(root, blocksize, blocksize,
6975 empty_size, hint, &ins, 0);
6977 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6978 return ERR_PTR(ret);
6981 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6983 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6985 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6987 parent = ins.objectid;
6988 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6992 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6993 struct btrfs_delayed_extent_op *extent_op;
6994 extent_op = btrfs_alloc_delayed_extent_op();
6995 BUG_ON(!extent_op); /* -ENOMEM */
6997 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6999 memset(&extent_op->key, 0, sizeof(extent_op->key));
7000 extent_op->flags_to_set = flags;
7001 if (skinny_metadata)
7002 extent_op->update_key = 0;
7004 extent_op->update_key = 1;
7005 extent_op->update_flags = 1;
7006 extent_op->is_data = 0;
7007 extent_op->level = level;
7009 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7011 ins.offset, parent, root_objectid,
7012 level, BTRFS_ADD_DELAYED_EXTENT,
7014 BUG_ON(ret); /* -ENOMEM */
7019 struct walk_control {
7020 u64 refs[BTRFS_MAX_LEVEL];
7021 u64 flags[BTRFS_MAX_LEVEL];
7022 struct btrfs_key update_progress;
7033 #define DROP_REFERENCE 1
7034 #define UPDATE_BACKREF 2
7036 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7037 struct btrfs_root *root,
7038 struct walk_control *wc,
7039 struct btrfs_path *path)
7047 struct btrfs_key key;
7048 struct extent_buffer *eb;
7053 if (path->slots[wc->level] < wc->reada_slot) {
7054 wc->reada_count = wc->reada_count * 2 / 3;
7055 wc->reada_count = max(wc->reada_count, 2);
7057 wc->reada_count = wc->reada_count * 3 / 2;
7058 wc->reada_count = min_t(int, wc->reada_count,
7059 BTRFS_NODEPTRS_PER_BLOCK(root));
7062 eb = path->nodes[wc->level];
7063 nritems = btrfs_header_nritems(eb);
7064 blocksize = btrfs_level_size(root, wc->level - 1);
7066 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7067 if (nread >= wc->reada_count)
7071 bytenr = btrfs_node_blockptr(eb, slot);
7072 generation = btrfs_node_ptr_generation(eb, slot);
7074 if (slot == path->slots[wc->level])
7077 if (wc->stage == UPDATE_BACKREF &&
7078 generation <= root->root_key.offset)
7081 /* We don't lock the tree block, it's OK to be racy here */
7082 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7083 wc->level - 1, 1, &refs,
7085 /* We don't care about errors in readahead. */
7090 if (wc->stage == DROP_REFERENCE) {
7094 if (wc->level == 1 &&
7095 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7097 if (!wc->update_ref ||
7098 generation <= root->root_key.offset)
7100 btrfs_node_key_to_cpu(eb, &key, slot);
7101 ret = btrfs_comp_cpu_keys(&key,
7102 &wc->update_progress);
7106 if (wc->level == 1 &&
7107 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7111 ret = readahead_tree_block(root, bytenr, blocksize,
7117 wc->reada_slot = slot;
7121 * helper to process tree block while walking down the tree.
7123 * when wc->stage == UPDATE_BACKREF, this function updates
7124 * back refs for pointers in the block.
7126 * NOTE: return value 1 means we should stop walking down.
7128 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7129 struct btrfs_root *root,
7130 struct btrfs_path *path,
7131 struct walk_control *wc, int lookup_info)
7133 int level = wc->level;
7134 struct extent_buffer *eb = path->nodes[level];
7135 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7138 if (wc->stage == UPDATE_BACKREF &&
7139 btrfs_header_owner(eb) != root->root_key.objectid)
7143 * when reference count of tree block is 1, it won't increase
7144 * again. once full backref flag is set, we never clear it.
7147 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7148 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7149 BUG_ON(!path->locks[level]);
7150 ret = btrfs_lookup_extent_info(trans, root,
7151 eb->start, level, 1,
7154 BUG_ON(ret == -ENOMEM);
7157 BUG_ON(wc->refs[level] == 0);
7160 if (wc->stage == DROP_REFERENCE) {
7161 if (wc->refs[level] > 1)
7164 if (path->locks[level] && !wc->keep_locks) {
7165 btrfs_tree_unlock_rw(eb, path->locks[level]);
7166 path->locks[level] = 0;
7171 /* wc->stage == UPDATE_BACKREF */
7172 if (!(wc->flags[level] & flag)) {
7173 BUG_ON(!path->locks[level]);
7174 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7175 BUG_ON(ret); /* -ENOMEM */
7176 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7177 BUG_ON(ret); /* -ENOMEM */
7178 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7180 btrfs_header_level(eb), 0);
7181 BUG_ON(ret); /* -ENOMEM */
7182 wc->flags[level] |= flag;
7186 * the block is shared by multiple trees, so it's not good to
7187 * keep the tree lock
7189 if (path->locks[level] && level > 0) {
7190 btrfs_tree_unlock_rw(eb, path->locks[level]);
7191 path->locks[level] = 0;
7197 * helper to process tree block pointer.
7199 * when wc->stage == DROP_REFERENCE, this function checks
7200 * reference count of the block pointed to. if the block
7201 * is shared and we need update back refs for the subtree
7202 * rooted at the block, this function changes wc->stage to
7203 * UPDATE_BACKREF. if the block is shared and there is no
7204 * need to update back, this function drops the reference
7207 * NOTE: return value 1 means we should stop walking down.
7209 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7210 struct btrfs_root *root,
7211 struct btrfs_path *path,
7212 struct walk_control *wc, int *lookup_info)
7218 struct btrfs_key key;
7219 struct extent_buffer *next;
7220 int level = wc->level;
7224 generation = btrfs_node_ptr_generation(path->nodes[level],
7225 path->slots[level]);
7227 * if the lower level block was created before the snapshot
7228 * was created, we know there is no need to update back refs
7231 if (wc->stage == UPDATE_BACKREF &&
7232 generation <= root->root_key.offset) {
7237 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7238 blocksize = btrfs_level_size(root, level - 1);
7240 next = btrfs_find_tree_block(root, bytenr, blocksize);
7242 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7245 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7249 btrfs_tree_lock(next);
7250 btrfs_set_lock_blocking(next);
7252 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7253 &wc->refs[level - 1],
7254 &wc->flags[level - 1]);
7256 btrfs_tree_unlock(next);
7260 if (unlikely(wc->refs[level - 1] == 0)) {
7261 btrfs_err(root->fs_info, "Missing references.");
7266 if (wc->stage == DROP_REFERENCE) {
7267 if (wc->refs[level - 1] > 1) {
7269 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7272 if (!wc->update_ref ||
7273 generation <= root->root_key.offset)
7276 btrfs_node_key_to_cpu(path->nodes[level], &key,
7277 path->slots[level]);
7278 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7282 wc->stage = UPDATE_BACKREF;
7283 wc->shared_level = level - 1;
7287 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7291 if (!btrfs_buffer_uptodate(next, generation, 0)) {
7292 btrfs_tree_unlock(next);
7293 free_extent_buffer(next);
7299 if (reada && level == 1)
7300 reada_walk_down(trans, root, wc, path);
7301 next = read_tree_block(root, bytenr, blocksize, generation);
7302 if (!next || !extent_buffer_uptodate(next)) {
7303 free_extent_buffer(next);
7306 btrfs_tree_lock(next);
7307 btrfs_set_lock_blocking(next);
7311 BUG_ON(level != btrfs_header_level(next));
7312 path->nodes[level] = next;
7313 path->slots[level] = 0;
7314 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7320 wc->refs[level - 1] = 0;
7321 wc->flags[level - 1] = 0;
7322 if (wc->stage == DROP_REFERENCE) {
7323 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7324 parent = path->nodes[level]->start;
7326 BUG_ON(root->root_key.objectid !=
7327 btrfs_header_owner(path->nodes[level]));
7331 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7332 root->root_key.objectid, level - 1, 0, 0);
7333 BUG_ON(ret); /* -ENOMEM */
7335 btrfs_tree_unlock(next);
7336 free_extent_buffer(next);
7342 * helper to process tree block while walking up the tree.
7344 * when wc->stage == DROP_REFERENCE, this function drops
7345 * reference count on the block.
7347 * when wc->stage == UPDATE_BACKREF, this function changes
7348 * wc->stage back to DROP_REFERENCE if we changed wc->stage
7349 * to UPDATE_BACKREF previously while processing the block.
7351 * NOTE: return value 1 means we should stop walking up.
7353 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7354 struct btrfs_root *root,
7355 struct btrfs_path *path,
7356 struct walk_control *wc)
7359 int level = wc->level;
7360 struct extent_buffer *eb = path->nodes[level];
7363 if (wc->stage == UPDATE_BACKREF) {
7364 BUG_ON(wc->shared_level < level);
7365 if (level < wc->shared_level)
7368 ret = find_next_key(path, level + 1, &wc->update_progress);
7372 wc->stage = DROP_REFERENCE;
7373 wc->shared_level = -1;
7374 path->slots[level] = 0;
7377 * check reference count again if the block isn't locked.
7378 * we should start walking down the tree again if reference
7381 if (!path->locks[level]) {
7383 btrfs_tree_lock(eb);
7384 btrfs_set_lock_blocking(eb);
7385 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7387 ret = btrfs_lookup_extent_info(trans, root,
7388 eb->start, level, 1,
7392 btrfs_tree_unlock_rw(eb, path->locks[level]);
7393 path->locks[level] = 0;
7396 BUG_ON(wc->refs[level] == 0);
7397 if (wc->refs[level] == 1) {
7398 btrfs_tree_unlock_rw(eb, path->locks[level]);
7399 path->locks[level] = 0;
7405 /* wc->stage == DROP_REFERENCE */
7406 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7408 if (wc->refs[level] == 1) {
7410 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7411 ret = btrfs_dec_ref(trans, root, eb, 1,
7414 ret = btrfs_dec_ref(trans, root, eb, 0,
7416 BUG_ON(ret); /* -ENOMEM */
7418 /* make block locked assertion in clean_tree_block happy */
7419 if (!path->locks[level] &&
7420 btrfs_header_generation(eb) == trans->transid) {
7421 btrfs_tree_lock(eb);
7422 btrfs_set_lock_blocking(eb);
7423 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7425 clean_tree_block(trans, root, eb);
7428 if (eb == root->node) {
7429 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7432 BUG_ON(root->root_key.objectid !=
7433 btrfs_header_owner(eb));
7435 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7436 parent = path->nodes[level + 1]->start;
7438 BUG_ON(root->root_key.objectid !=
7439 btrfs_header_owner(path->nodes[level + 1]));
7442 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7444 wc->refs[level] = 0;
7445 wc->flags[level] = 0;
7449 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7450 struct btrfs_root *root,
7451 struct btrfs_path *path,
7452 struct walk_control *wc)
7454 int level = wc->level;
7455 int lookup_info = 1;
7458 while (level >= 0) {
7459 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7466 if (path->slots[level] >=
7467 btrfs_header_nritems(path->nodes[level]))
7470 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7472 path->slots[level]++;
7481 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7482 struct btrfs_root *root,
7483 struct btrfs_path *path,
7484 struct walk_control *wc, int max_level)
7486 int level = wc->level;
7489 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7490 while (level < max_level && path->nodes[level]) {
7492 if (path->slots[level] + 1 <
7493 btrfs_header_nritems(path->nodes[level])) {
7494 path->slots[level]++;
7497 ret = walk_up_proc(trans, root, path, wc);
7501 if (path->locks[level]) {
7502 btrfs_tree_unlock_rw(path->nodes[level],
7503 path->locks[level]);
7504 path->locks[level] = 0;
7506 free_extent_buffer(path->nodes[level]);
7507 path->nodes[level] = NULL;
7515 * drop a subvolume tree.
7517 * this function traverses the tree freeing any blocks that only
7518 * referenced by the tree.
7520 * when a shared tree block is found. this function decreases its
7521 * reference count by one. if update_ref is true, this function
7522 * also make sure backrefs for the shared block and all lower level
7523 * blocks are properly updated.
7525 * If called with for_reloc == 0, may exit early with -EAGAIN
7527 int btrfs_drop_snapshot(struct btrfs_root *root,
7528 struct btrfs_block_rsv *block_rsv, int update_ref,
7531 struct btrfs_path *path;
7532 struct btrfs_trans_handle *trans;
7533 struct btrfs_root *tree_root = root->fs_info->tree_root;
7534 struct btrfs_root_item *root_item = &root->root_item;
7535 struct walk_control *wc;
7536 struct btrfs_key key;
7540 bool root_dropped = false;
7542 path = btrfs_alloc_path();
7548 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7550 btrfs_free_path(path);
7555 trans = btrfs_start_transaction(tree_root, 0);
7556 if (IS_ERR(trans)) {
7557 err = PTR_ERR(trans);
7562 trans->block_rsv = block_rsv;
7564 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7565 level = btrfs_header_level(root->node);
7566 path->nodes[level] = btrfs_lock_root_node(root);
7567 btrfs_set_lock_blocking(path->nodes[level]);
7568 path->slots[level] = 0;
7569 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7570 memset(&wc->update_progress, 0,
7571 sizeof(wc->update_progress));
7573 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7574 memcpy(&wc->update_progress, &key,
7575 sizeof(wc->update_progress));
7577 level = root_item->drop_level;
7579 path->lowest_level = level;
7580 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7581 path->lowest_level = 0;
7589 * unlock our path, this is safe because only this
7590 * function is allowed to delete this snapshot
7592 btrfs_unlock_up_safe(path, 0);
7594 level = btrfs_header_level(root->node);
7596 btrfs_tree_lock(path->nodes[level]);
7597 btrfs_set_lock_blocking(path->nodes[level]);
7598 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7600 ret = btrfs_lookup_extent_info(trans, root,
7601 path->nodes[level]->start,
7602 level, 1, &wc->refs[level],
7608 BUG_ON(wc->refs[level] == 0);
7610 if (level == root_item->drop_level)
7613 btrfs_tree_unlock(path->nodes[level]);
7614 path->locks[level] = 0;
7615 WARN_ON(wc->refs[level] != 1);
7621 wc->shared_level = -1;
7622 wc->stage = DROP_REFERENCE;
7623 wc->update_ref = update_ref;
7625 wc->for_reloc = for_reloc;
7626 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7630 ret = walk_down_tree(trans, root, path, wc);
7636 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7643 BUG_ON(wc->stage != DROP_REFERENCE);
7647 if (wc->stage == DROP_REFERENCE) {
7649 btrfs_node_key(path->nodes[level],
7650 &root_item->drop_progress,
7651 path->slots[level]);
7652 root_item->drop_level = level;
7655 BUG_ON(wc->level == 0);
7656 if (btrfs_should_end_transaction(trans, tree_root) ||
7657 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7658 ret = btrfs_update_root(trans, tree_root,
7662 btrfs_abort_transaction(trans, tree_root, ret);
7667 btrfs_end_transaction_throttle(trans, tree_root);
7668 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7669 pr_debug("btrfs: drop snapshot early exit\n");
7674 trans = btrfs_start_transaction(tree_root, 0);
7675 if (IS_ERR(trans)) {
7676 err = PTR_ERR(trans);
7680 trans->block_rsv = block_rsv;
7683 btrfs_release_path(path);
7687 ret = btrfs_del_root(trans, tree_root, &root->root_key);
7689 btrfs_abort_transaction(trans, tree_root, ret);
7693 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7694 ret = btrfs_find_root(tree_root, &root->root_key, path,
7697 btrfs_abort_transaction(trans, tree_root, ret);
7700 } else if (ret > 0) {
7701 /* if we fail to delete the orphan item this time
7702 * around, it'll get picked up the next time.
7704 * The most common failure here is just -ENOENT.
7706 btrfs_del_orphan_item(trans, tree_root,
7707 root->root_key.objectid);
7711 if (root->in_radix) {
7712 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7714 free_extent_buffer(root->node);
7715 free_extent_buffer(root->commit_root);
7716 btrfs_put_fs_root(root);
7718 root_dropped = true;
7720 btrfs_end_transaction_throttle(trans, tree_root);
7723 btrfs_free_path(path);
7726 * So if we need to stop dropping the snapshot for whatever reason we
7727 * need to make sure to add it back to the dead root list so that we
7728 * keep trying to do the work later. This also cleans up roots if we
7729 * don't have it in the radix (like when we recover after a power fail
7730 * or unmount) so we don't leak memory.
7732 if (!for_reloc && root_dropped == false)
7733 btrfs_add_dead_root(root);
7735 btrfs_std_error(root->fs_info, err);
7740 * drop subtree rooted at tree block 'node'.
7742 * NOTE: this function will unlock and release tree block 'node'
7743 * only used by relocation code
7745 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7746 struct btrfs_root *root,
7747 struct extent_buffer *node,
7748 struct extent_buffer *parent)
7750 struct btrfs_path *path;
7751 struct walk_control *wc;
7757 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7759 path = btrfs_alloc_path();
7763 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7765 btrfs_free_path(path);
7769 btrfs_assert_tree_locked(parent);
7770 parent_level = btrfs_header_level(parent);
7771 extent_buffer_get(parent);
7772 path->nodes[parent_level] = parent;
7773 path->slots[parent_level] = btrfs_header_nritems(parent);
7775 btrfs_assert_tree_locked(node);
7776 level = btrfs_header_level(node);
7777 path->nodes[level] = node;
7778 path->slots[level] = 0;
7779 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7781 wc->refs[parent_level] = 1;
7782 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7784 wc->shared_level = -1;
7785 wc->stage = DROP_REFERENCE;
7789 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7792 wret = walk_down_tree(trans, root, path, wc);
7798 wret = walk_up_tree(trans, root, path, wc, parent_level);
7806 btrfs_free_path(path);
7810 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7816 * if restripe for this chunk_type is on pick target profile and
7817 * return, otherwise do the usual balance
7819 stripped = get_restripe_target(root->fs_info, flags);
7821 return extended_to_chunk(stripped);
7824 * we add in the count of missing devices because we want
7825 * to make sure that any RAID levels on a degraded FS
7826 * continue to be honored.
7828 num_devices = root->fs_info->fs_devices->rw_devices +
7829 root->fs_info->fs_devices->missing_devices;
7831 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7832 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7833 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7835 if (num_devices == 1) {
7836 stripped |= BTRFS_BLOCK_GROUP_DUP;
7837 stripped = flags & ~stripped;
7839 /* turn raid0 into single device chunks */
7840 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7843 /* turn mirroring into duplication */
7844 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7845 BTRFS_BLOCK_GROUP_RAID10))
7846 return stripped | BTRFS_BLOCK_GROUP_DUP;
7848 /* they already had raid on here, just return */
7849 if (flags & stripped)
7852 stripped |= BTRFS_BLOCK_GROUP_DUP;
7853 stripped = flags & ~stripped;
7855 /* switch duplicated blocks with raid1 */
7856 if (flags & BTRFS_BLOCK_GROUP_DUP)
7857 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7859 /* this is drive concat, leave it alone */
7865 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7867 struct btrfs_space_info *sinfo = cache->space_info;
7869 u64 min_allocable_bytes;
7874 * We need some metadata space and system metadata space for
7875 * allocating chunks in some corner cases until we force to set
7876 * it to be readonly.
7879 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7881 min_allocable_bytes = 1 * 1024 * 1024;
7883 min_allocable_bytes = 0;
7885 spin_lock(&sinfo->lock);
7886 spin_lock(&cache->lock);
7893 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7894 cache->bytes_super - btrfs_block_group_used(&cache->item);
7896 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7897 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7898 min_allocable_bytes <= sinfo->total_bytes) {
7899 sinfo->bytes_readonly += num_bytes;
7904 spin_unlock(&cache->lock);
7905 spin_unlock(&sinfo->lock);
7909 int btrfs_set_block_group_ro(struct btrfs_root *root,
7910 struct btrfs_block_group_cache *cache)
7913 struct btrfs_trans_handle *trans;
7919 trans = btrfs_join_transaction(root);
7921 return PTR_ERR(trans);
7923 alloc_flags = update_block_group_flags(root, cache->flags);
7924 if (alloc_flags != cache->flags) {
7925 ret = do_chunk_alloc(trans, root, alloc_flags,
7931 ret = set_block_group_ro(cache, 0);
7934 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7935 ret = do_chunk_alloc(trans, root, alloc_flags,
7939 ret = set_block_group_ro(cache, 0);
7941 btrfs_end_transaction(trans, root);
7945 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7946 struct btrfs_root *root, u64 type)
7948 u64 alloc_flags = get_alloc_profile(root, type);
7949 return do_chunk_alloc(trans, root, alloc_flags,
7954 * helper to account the unused space of all the readonly block group in the
7955 * list. takes mirrors into account.
7957 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7959 struct btrfs_block_group_cache *block_group;
7963 list_for_each_entry(block_group, groups_list, list) {
7964 spin_lock(&block_group->lock);
7966 if (!block_group->ro) {
7967 spin_unlock(&block_group->lock);
7971 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7972 BTRFS_BLOCK_GROUP_RAID10 |
7973 BTRFS_BLOCK_GROUP_DUP))
7978 free_bytes += (block_group->key.offset -
7979 btrfs_block_group_used(&block_group->item)) *
7982 spin_unlock(&block_group->lock);
7989 * helper to account the unused space of all the readonly block group in the
7990 * space_info. takes mirrors into account.
7992 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7997 spin_lock(&sinfo->lock);
7999 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8000 if (!list_empty(&sinfo->block_groups[i]))
8001 free_bytes += __btrfs_get_ro_block_group_free_space(
8002 &sinfo->block_groups[i]);
8004 spin_unlock(&sinfo->lock);
8009 void btrfs_set_block_group_rw(struct btrfs_root *root,
8010 struct btrfs_block_group_cache *cache)
8012 struct btrfs_space_info *sinfo = cache->space_info;
8017 spin_lock(&sinfo->lock);
8018 spin_lock(&cache->lock);
8019 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8020 cache->bytes_super - btrfs_block_group_used(&cache->item);
8021 sinfo->bytes_readonly -= num_bytes;
8023 spin_unlock(&cache->lock);
8024 spin_unlock(&sinfo->lock);
8028 * checks to see if its even possible to relocate this block group.
8030 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8031 * ok to go ahead and try.
8033 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8035 struct btrfs_block_group_cache *block_group;
8036 struct btrfs_space_info *space_info;
8037 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8038 struct btrfs_device *device;
8039 struct btrfs_trans_handle *trans;
8048 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8050 /* odd, couldn't find the block group, leave it alone */
8054 min_free = btrfs_block_group_used(&block_group->item);
8056 /* no bytes used, we're good */
8060 space_info = block_group->space_info;
8061 spin_lock(&space_info->lock);
8063 full = space_info->full;
8066 * if this is the last block group we have in this space, we can't
8067 * relocate it unless we're able to allocate a new chunk below.
8069 * Otherwise, we need to make sure we have room in the space to handle
8070 * all of the extents from this block group. If we can, we're good
8072 if ((space_info->total_bytes != block_group->key.offset) &&
8073 (space_info->bytes_used + space_info->bytes_reserved +
8074 space_info->bytes_pinned + space_info->bytes_readonly +
8075 min_free < space_info->total_bytes)) {
8076 spin_unlock(&space_info->lock);
8079 spin_unlock(&space_info->lock);
8082 * ok we don't have enough space, but maybe we have free space on our
8083 * devices to allocate new chunks for relocation, so loop through our
8084 * alloc devices and guess if we have enough space. if this block
8085 * group is going to be restriped, run checks against the target
8086 * profile instead of the current one.
8098 target = get_restripe_target(root->fs_info, block_group->flags);
8100 index = __get_raid_index(extended_to_chunk(target));
8103 * this is just a balance, so if we were marked as full
8104 * we know there is no space for a new chunk
8109 index = get_block_group_index(block_group);
8112 if (index == BTRFS_RAID_RAID10) {
8116 } else if (index == BTRFS_RAID_RAID1) {
8118 } else if (index == BTRFS_RAID_DUP) {
8121 } else if (index == BTRFS_RAID_RAID0) {
8122 dev_min = fs_devices->rw_devices;
8123 do_div(min_free, dev_min);
8126 /* We need to do this so that we can look at pending chunks */
8127 trans = btrfs_join_transaction(root);
8128 if (IS_ERR(trans)) {
8129 ret = PTR_ERR(trans);
8133 mutex_lock(&root->fs_info->chunk_mutex);
8134 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8138 * check to make sure we can actually find a chunk with enough
8139 * space to fit our block group in.
8141 if (device->total_bytes > device->bytes_used + min_free &&
8142 !device->is_tgtdev_for_dev_replace) {
8143 ret = find_free_dev_extent(trans, device, min_free,
8148 if (dev_nr >= dev_min)
8154 mutex_unlock(&root->fs_info->chunk_mutex);
8155 btrfs_end_transaction(trans, root);
8157 btrfs_put_block_group(block_group);
8161 static int find_first_block_group(struct btrfs_root *root,
8162 struct btrfs_path *path, struct btrfs_key *key)
8165 struct btrfs_key found_key;
8166 struct extent_buffer *leaf;
8169 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8174 slot = path->slots[0];
8175 leaf = path->nodes[0];
8176 if (slot >= btrfs_header_nritems(leaf)) {
8177 ret = btrfs_next_leaf(root, path);
8184 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8186 if (found_key.objectid >= key->objectid &&
8187 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8197 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8199 struct btrfs_block_group_cache *block_group;
8203 struct inode *inode;
8205 block_group = btrfs_lookup_first_block_group(info, last);
8206 while (block_group) {
8207 spin_lock(&block_group->lock);
8208 if (block_group->iref)
8210 spin_unlock(&block_group->lock);
8211 block_group = next_block_group(info->tree_root,
8221 inode = block_group->inode;
8222 block_group->iref = 0;
8223 block_group->inode = NULL;
8224 spin_unlock(&block_group->lock);
8226 last = block_group->key.objectid + block_group->key.offset;
8227 btrfs_put_block_group(block_group);
8231 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8233 struct btrfs_block_group_cache *block_group;
8234 struct btrfs_space_info *space_info;
8235 struct btrfs_caching_control *caching_ctl;
8238 down_write(&info->extent_commit_sem);
8239 while (!list_empty(&info->caching_block_groups)) {
8240 caching_ctl = list_entry(info->caching_block_groups.next,
8241 struct btrfs_caching_control, list);
8242 list_del(&caching_ctl->list);
8243 put_caching_control(caching_ctl);
8245 up_write(&info->extent_commit_sem);
8247 spin_lock(&info->block_group_cache_lock);
8248 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8249 block_group = rb_entry(n, struct btrfs_block_group_cache,
8251 rb_erase(&block_group->cache_node,
8252 &info->block_group_cache_tree);
8253 spin_unlock(&info->block_group_cache_lock);
8255 down_write(&block_group->space_info->groups_sem);
8256 list_del(&block_group->list);
8257 up_write(&block_group->space_info->groups_sem);
8259 if (block_group->cached == BTRFS_CACHE_STARTED)
8260 wait_block_group_cache_done(block_group);
8263 * We haven't cached this block group, which means we could
8264 * possibly have excluded extents on this block group.
8266 if (block_group->cached == BTRFS_CACHE_NO ||
8267 block_group->cached == BTRFS_CACHE_ERROR)
8268 free_excluded_extents(info->extent_root, block_group);
8270 btrfs_remove_free_space_cache(block_group);
8271 btrfs_put_block_group(block_group);
8273 spin_lock(&info->block_group_cache_lock);
8275 spin_unlock(&info->block_group_cache_lock);
8277 /* now that all the block groups are freed, go through and
8278 * free all the space_info structs. This is only called during
8279 * the final stages of unmount, and so we know nobody is
8280 * using them. We call synchronize_rcu() once before we start,
8281 * just to be on the safe side.
8285 release_global_block_rsv(info);
8287 while(!list_empty(&info->space_info)) {
8288 space_info = list_entry(info->space_info.next,
8289 struct btrfs_space_info,
8291 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8292 if (space_info->bytes_pinned > 0 ||
8293 space_info->bytes_reserved > 0 ||
8294 space_info->bytes_may_use > 0) {
8296 dump_space_info(space_info, 0, 0);
8299 percpu_counter_destroy(&space_info->total_bytes_pinned);
8300 list_del(&space_info->list);
8306 static void __link_block_group(struct btrfs_space_info *space_info,
8307 struct btrfs_block_group_cache *cache)
8309 int index = get_block_group_index(cache);
8311 down_write(&space_info->groups_sem);
8312 list_add_tail(&cache->list, &space_info->block_groups[index]);
8313 up_write(&space_info->groups_sem);
8316 int btrfs_read_block_groups(struct btrfs_root *root)
8318 struct btrfs_path *path;
8320 struct btrfs_block_group_cache *cache;
8321 struct btrfs_fs_info *info = root->fs_info;
8322 struct btrfs_space_info *space_info;
8323 struct btrfs_key key;
8324 struct btrfs_key found_key;
8325 struct extent_buffer *leaf;
8329 root = info->extent_root;
8332 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8333 path = btrfs_alloc_path();
8338 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8339 if (btrfs_test_opt(root, SPACE_CACHE) &&
8340 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8342 if (btrfs_test_opt(root, CLEAR_CACHE))
8346 ret = find_first_block_group(root, path, &key);
8351 leaf = path->nodes[0];
8352 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8353 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8358 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8360 if (!cache->free_space_ctl) {
8366 atomic_set(&cache->count, 1);
8367 spin_lock_init(&cache->lock);
8368 cache->fs_info = info;
8369 INIT_LIST_HEAD(&cache->list);
8370 INIT_LIST_HEAD(&cache->cluster_list);
8374 * When we mount with old space cache, we need to
8375 * set BTRFS_DC_CLEAR and set dirty flag.
8377 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8378 * truncate the old free space cache inode and
8380 * b) Setting 'dirty flag' makes sure that we flush
8381 * the new space cache info onto disk.
8383 cache->disk_cache_state = BTRFS_DC_CLEAR;
8384 if (btrfs_test_opt(root, SPACE_CACHE))
8388 read_extent_buffer(leaf, &cache->item,
8389 btrfs_item_ptr_offset(leaf, path->slots[0]),
8390 sizeof(cache->item));
8391 memcpy(&cache->key, &found_key, sizeof(found_key));
8393 key.objectid = found_key.objectid + found_key.offset;
8394 btrfs_release_path(path);
8395 cache->flags = btrfs_block_group_flags(&cache->item);
8396 cache->sectorsize = root->sectorsize;
8397 cache->full_stripe_len = btrfs_full_stripe_len(root,
8398 &root->fs_info->mapping_tree,
8399 found_key.objectid);
8400 btrfs_init_free_space_ctl(cache);
8403 * We need to exclude the super stripes now so that the space
8404 * info has super bytes accounted for, otherwise we'll think
8405 * we have more space than we actually do.
8407 ret = exclude_super_stripes(root, cache);
8410 * We may have excluded something, so call this just in
8413 free_excluded_extents(root, cache);
8414 kfree(cache->free_space_ctl);
8420 * check for two cases, either we are full, and therefore
8421 * don't need to bother with the caching work since we won't
8422 * find any space, or we are empty, and we can just add all
8423 * the space in and be done with it. This saves us _alot_ of
8424 * time, particularly in the full case.
8426 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8427 cache->last_byte_to_unpin = (u64)-1;
8428 cache->cached = BTRFS_CACHE_FINISHED;
8429 free_excluded_extents(root, cache);
8430 } else if (btrfs_block_group_used(&cache->item) == 0) {
8431 cache->last_byte_to_unpin = (u64)-1;
8432 cache->cached = BTRFS_CACHE_FINISHED;
8433 add_new_free_space(cache, root->fs_info,
8435 found_key.objectid +
8437 free_excluded_extents(root, cache);
8440 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8442 btrfs_remove_free_space_cache(cache);
8443 btrfs_put_block_group(cache);
8447 ret = update_space_info(info, cache->flags, found_key.offset,
8448 btrfs_block_group_used(&cache->item),
8451 btrfs_remove_free_space_cache(cache);
8452 spin_lock(&info->block_group_cache_lock);
8453 rb_erase(&cache->cache_node,
8454 &info->block_group_cache_tree);
8455 spin_unlock(&info->block_group_cache_lock);
8456 btrfs_put_block_group(cache);
8460 cache->space_info = space_info;
8461 spin_lock(&cache->space_info->lock);
8462 cache->space_info->bytes_readonly += cache->bytes_super;
8463 spin_unlock(&cache->space_info->lock);
8465 __link_block_group(space_info, cache);
8467 set_avail_alloc_bits(root->fs_info, cache->flags);
8468 if (btrfs_chunk_readonly(root, cache->key.objectid))
8469 set_block_group_ro(cache, 1);
8472 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8473 if (!(get_alloc_profile(root, space_info->flags) &
8474 (BTRFS_BLOCK_GROUP_RAID10 |
8475 BTRFS_BLOCK_GROUP_RAID1 |
8476 BTRFS_BLOCK_GROUP_RAID5 |
8477 BTRFS_BLOCK_GROUP_RAID6 |
8478 BTRFS_BLOCK_GROUP_DUP)))
8481 * avoid allocating from un-mirrored block group if there are
8482 * mirrored block groups.
8484 list_for_each_entry(cache,
8485 &space_info->block_groups[BTRFS_RAID_RAID0],
8487 set_block_group_ro(cache, 1);
8488 list_for_each_entry(cache,
8489 &space_info->block_groups[BTRFS_RAID_SINGLE],
8491 set_block_group_ro(cache, 1);
8494 init_global_block_rsv(info);
8497 btrfs_free_path(path);
8501 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8502 struct btrfs_root *root)
8504 struct btrfs_block_group_cache *block_group, *tmp;
8505 struct btrfs_root *extent_root = root->fs_info->extent_root;
8506 struct btrfs_block_group_item item;
8507 struct btrfs_key key;
8510 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8512 list_del_init(&block_group->new_bg_list);
8517 spin_lock(&block_group->lock);
8518 memcpy(&item, &block_group->item, sizeof(item));
8519 memcpy(&key, &block_group->key, sizeof(key));
8520 spin_unlock(&block_group->lock);
8522 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8525 btrfs_abort_transaction(trans, extent_root, ret);
8526 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8527 key.objectid, key.offset);
8529 btrfs_abort_transaction(trans, extent_root, ret);
8533 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8534 struct btrfs_root *root, u64 bytes_used,
8535 u64 type, u64 chunk_objectid, u64 chunk_offset,
8539 struct btrfs_root *extent_root;
8540 struct btrfs_block_group_cache *cache;
8542 extent_root = root->fs_info->extent_root;
8544 root->fs_info->last_trans_log_full_commit = trans->transid;
8546 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8549 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8551 if (!cache->free_space_ctl) {
8556 cache->key.objectid = chunk_offset;
8557 cache->key.offset = size;
8558 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8559 cache->sectorsize = root->sectorsize;
8560 cache->fs_info = root->fs_info;
8561 cache->full_stripe_len = btrfs_full_stripe_len(root,
8562 &root->fs_info->mapping_tree,
8565 atomic_set(&cache->count, 1);
8566 spin_lock_init(&cache->lock);
8567 INIT_LIST_HEAD(&cache->list);
8568 INIT_LIST_HEAD(&cache->cluster_list);
8569 INIT_LIST_HEAD(&cache->new_bg_list);
8571 btrfs_init_free_space_ctl(cache);
8573 btrfs_set_block_group_used(&cache->item, bytes_used);
8574 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8575 cache->flags = type;
8576 btrfs_set_block_group_flags(&cache->item, type);
8578 cache->last_byte_to_unpin = (u64)-1;
8579 cache->cached = BTRFS_CACHE_FINISHED;
8580 ret = exclude_super_stripes(root, cache);
8583 * We may have excluded something, so call this just in
8586 free_excluded_extents(root, cache);
8587 kfree(cache->free_space_ctl);
8592 add_new_free_space(cache, root->fs_info, chunk_offset,
8593 chunk_offset + size);
8595 free_excluded_extents(root, cache);
8597 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8599 btrfs_remove_free_space_cache(cache);
8600 btrfs_put_block_group(cache);
8604 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8605 &cache->space_info);
8607 btrfs_remove_free_space_cache(cache);
8608 spin_lock(&root->fs_info->block_group_cache_lock);
8609 rb_erase(&cache->cache_node,
8610 &root->fs_info->block_group_cache_tree);
8611 spin_unlock(&root->fs_info->block_group_cache_lock);
8612 btrfs_put_block_group(cache);
8615 update_global_block_rsv(root->fs_info);
8617 spin_lock(&cache->space_info->lock);
8618 cache->space_info->bytes_readonly += cache->bytes_super;
8619 spin_unlock(&cache->space_info->lock);
8621 __link_block_group(cache->space_info, cache);
8623 list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8625 set_avail_alloc_bits(extent_root->fs_info, type);
8630 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8632 u64 extra_flags = chunk_to_extended(flags) &
8633 BTRFS_EXTENDED_PROFILE_MASK;
8635 write_seqlock(&fs_info->profiles_lock);
8636 if (flags & BTRFS_BLOCK_GROUP_DATA)
8637 fs_info->avail_data_alloc_bits &= ~extra_flags;
8638 if (flags & BTRFS_BLOCK_GROUP_METADATA)
8639 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8640 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8641 fs_info->avail_system_alloc_bits &= ~extra_flags;
8642 write_sequnlock(&fs_info->profiles_lock);
8645 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8646 struct btrfs_root *root, u64 group_start)
8648 struct btrfs_path *path;
8649 struct btrfs_block_group_cache *block_group;
8650 struct btrfs_free_cluster *cluster;
8651 struct btrfs_root *tree_root = root->fs_info->tree_root;
8652 struct btrfs_key key;
8653 struct inode *inode;
8658 root = root->fs_info->extent_root;
8660 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8661 BUG_ON(!block_group);
8662 BUG_ON(!block_group->ro);
8665 * Free the reserved super bytes from this block group before
8668 free_excluded_extents(root, block_group);
8670 memcpy(&key, &block_group->key, sizeof(key));
8671 index = get_block_group_index(block_group);
8672 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8673 BTRFS_BLOCK_GROUP_RAID1 |
8674 BTRFS_BLOCK_GROUP_RAID10))
8679 /* make sure this block group isn't part of an allocation cluster */
8680 cluster = &root->fs_info->data_alloc_cluster;
8681 spin_lock(&cluster->refill_lock);
8682 btrfs_return_cluster_to_free_space(block_group, cluster);
8683 spin_unlock(&cluster->refill_lock);
8686 * make sure this block group isn't part of a metadata
8687 * allocation cluster
8689 cluster = &root->fs_info->meta_alloc_cluster;
8690 spin_lock(&cluster->refill_lock);
8691 btrfs_return_cluster_to_free_space(block_group, cluster);
8692 spin_unlock(&cluster->refill_lock);
8694 path = btrfs_alloc_path();
8700 inode = lookup_free_space_inode(tree_root, block_group, path);
8701 if (!IS_ERR(inode)) {
8702 ret = btrfs_orphan_add(trans, inode);
8704 btrfs_add_delayed_iput(inode);
8708 /* One for the block groups ref */
8709 spin_lock(&block_group->lock);
8710 if (block_group->iref) {
8711 block_group->iref = 0;
8712 block_group->inode = NULL;
8713 spin_unlock(&block_group->lock);
8716 spin_unlock(&block_group->lock);
8718 /* One for our lookup ref */
8719 btrfs_add_delayed_iput(inode);
8722 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8723 key.offset = block_group->key.objectid;
8726 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8730 btrfs_release_path(path);
8732 ret = btrfs_del_item(trans, tree_root, path);
8735 btrfs_release_path(path);
8738 spin_lock(&root->fs_info->block_group_cache_lock);
8739 rb_erase(&block_group->cache_node,
8740 &root->fs_info->block_group_cache_tree);
8742 if (root->fs_info->first_logical_byte == block_group->key.objectid)
8743 root->fs_info->first_logical_byte = (u64)-1;
8744 spin_unlock(&root->fs_info->block_group_cache_lock);
8746 down_write(&block_group->space_info->groups_sem);
8748 * we must use list_del_init so people can check to see if they
8749 * are still on the list after taking the semaphore
8751 list_del_init(&block_group->list);
8752 if (list_empty(&block_group->space_info->block_groups[index]))
8753 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8754 up_write(&block_group->space_info->groups_sem);
8756 if (block_group->cached == BTRFS_CACHE_STARTED)
8757 wait_block_group_cache_done(block_group);
8759 btrfs_remove_free_space_cache(block_group);
8761 spin_lock(&block_group->space_info->lock);
8762 block_group->space_info->total_bytes -= block_group->key.offset;
8763 block_group->space_info->bytes_readonly -= block_group->key.offset;
8764 block_group->space_info->disk_total -= block_group->key.offset * factor;
8765 spin_unlock(&block_group->space_info->lock);
8767 memcpy(&key, &block_group->key, sizeof(key));
8769 btrfs_clear_space_info_full(root->fs_info);
8771 btrfs_put_block_group(block_group);
8772 btrfs_put_block_group(block_group);
8774 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8780 ret = btrfs_del_item(trans, root, path);
8782 btrfs_free_path(path);
8786 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8788 struct btrfs_space_info *space_info;
8789 struct btrfs_super_block *disk_super;
8795 disk_super = fs_info->super_copy;
8796 if (!btrfs_super_root(disk_super))
8799 features = btrfs_super_incompat_flags(disk_super);
8800 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8803 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8804 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8809 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8810 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8812 flags = BTRFS_BLOCK_GROUP_METADATA;
8813 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8817 flags = BTRFS_BLOCK_GROUP_DATA;
8818 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8824 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8826 return unpin_extent_range(root, start, end);
8829 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8830 u64 num_bytes, u64 *actual_bytes)
8832 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8835 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8837 struct btrfs_fs_info *fs_info = root->fs_info;
8838 struct btrfs_block_group_cache *cache = NULL;
8843 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8847 * try to trim all FS space, our block group may start from non-zero.
8849 if (range->len == total_bytes)
8850 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8852 cache = btrfs_lookup_block_group(fs_info, range->start);
8855 if (cache->key.objectid >= (range->start + range->len)) {
8856 btrfs_put_block_group(cache);
8860 start = max(range->start, cache->key.objectid);
8861 end = min(range->start + range->len,
8862 cache->key.objectid + cache->key.offset);
8864 if (end - start >= range->minlen) {
8865 if (!block_group_cache_done(cache)) {
8866 ret = cache_block_group(cache, 0);
8868 btrfs_put_block_group(cache);
8871 ret = wait_block_group_cache_done(cache);
8873 btrfs_put_block_group(cache);
8877 ret = btrfs_trim_block_group(cache,
8883 trimmed += group_trimmed;
8885 btrfs_put_block_group(cache);
8890 cache = next_block_group(fs_info->tree_root, cache);
8893 range->len = trimmed;