btrfs: prefix fsid to all trace events
[cascardo/linux.git] / fs / btrfs / extent-tree.c
index a0ca2b5..73f6dd2 100644 (file)
@@ -2204,7 +2204,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
        ins.type = BTRFS_EXTENT_ITEM_KEY;
 
        ref = btrfs_delayed_node_to_data_ref(node);
-       trace_run_delayed_data_ref(node, ref, node->action);
+       trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
 
        if (node->type == BTRFS_SHARED_DATA_REF_KEY)
                parent = ref->parent;
@@ -2359,7 +2359,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
                                                 SKINNY_METADATA);
 
        ref = btrfs_delayed_node_to_tree_ref(node);
-       trace_run_delayed_tree_ref(node, ref, node->action);
+       trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
 
        if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
                parent = ref->parent;
@@ -2423,7 +2423,8 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
                 */
                BUG_ON(extent_op);
                head = btrfs_delayed_node_to_head(node);
-               trace_run_delayed_ref_head(node, head, node->action);
+               trace_run_delayed_ref_head(root->fs_info, node, head,
+                                          node->action);
 
                if (insert_reserved) {
                        btrfs_pin_extent(root, node->bytenr,
@@ -4936,7 +4937,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
 }
 
 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
-                                       struct btrfs_fs_info *fs_info, u64 used)
+                                       struct btrfs_root *root, u64 used)
 {
        u64 thresh = div_factor_fine(space_info->total_bytes, 98);
 
@@ -4944,11 +4945,12 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
        if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
                return 0;
 
-       if (!btrfs_calc_reclaim_metadata_size(fs_info->fs_root, space_info))
+       if (!btrfs_calc_reclaim_metadata_size(root, space_info))
                return 0;
 
-       return (used >= thresh && !btrfs_fs_closing(fs_info) &&
-               !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
+       return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
+               !test_bit(BTRFS_FS_STATE_REMOUNTING,
+                         &root->fs_info->fs_state));
 }
 
 static void wake_all_tickets(struct list_head *head)
@@ -5134,6 +5136,8 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
        int ret = 0;
 
        ASSERT(orig_bytes);
+       ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
+
        spin_lock(&space_info->lock);
        ret = -ENOSPC;
        used = space_info->bytes_used + space_info->bytes_reserved +
@@ -5193,7 +5197,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
                 * the async reclaim as we will panic.
                 */
                if (!root->fs_info->log_root_recovering &&
-                   need_do_async_reclaim(space_info, root->fs_info, used) &&
+                   need_do_async_reclaim(space_info, root, used) &&
                    !work_busy(&root->fs_info->async_reclaim_work)) {
                        trace_btrfs_trigger_flush(root->fs_info,
                                                  space_info->flags,
@@ -5614,48 +5618,21 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
                                num_bytes);
 }
 
-/*
- * helper to calculate size of global block reservation.
- * the desired value is sum of space used by extent tree,
- * checksum tree and root tree
- */
-static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
-{
-       struct btrfs_space_info *sinfo;
-       u64 num_bytes;
-       u64 meta_used;
-       u64 data_used;
-       int csum_size = btrfs_super_csum_size(fs_info->super_copy);
-
-       sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
-       spin_lock(&sinfo->lock);
-       data_used = sinfo->bytes_used;
-       spin_unlock(&sinfo->lock);
-
-       sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
-       spin_lock(&sinfo->lock);
-       if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
-               data_used = 0;
-       meta_used = sinfo->bytes_used;
-       spin_unlock(&sinfo->lock);
-
-       num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
-                   csum_size * 2;
-       num_bytes += div_u64(data_used + meta_used, 50);
-
-       if (num_bytes * 3 > meta_used)
-               num_bytes = div_u64(meta_used, 3);
-
-       return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
-}
-
 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
 {
        struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
        struct btrfs_space_info *sinfo = block_rsv->space_info;
        u64 num_bytes;
 
-       num_bytes = calc_global_metadata_size(fs_info);
+       /*
+        * The global block rsv is based on the size of the extent tree, the
+        * checksum tree and the root tree.  If the fs is empty we want to set
+        * it to a minimal amount for safety.
+        */
+       num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
+               btrfs_root_used(&fs_info->csum_root->root_item) +
+               btrfs_root_used(&fs_info->tree_root->root_item);
+       num_bytes = max_t(u64, num_bytes, SZ_16M);
 
        spin_lock(&sinfo->lock);
        spin_lock(&block_rsv->lock);
@@ -5770,7 +5747,13 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
                                  struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
+       /*
+        * We always use trans->block_rsv here as we will have reserved space
+        * for our orphan when starting the transaction, using get_block_rsv()
+        * here will sometimes make us choose the wrong block rsv as we could be
+        * doing a reloc inode for a non refcounted root.
+        */
+       struct btrfs_block_rsv *src_rsv = trans->block_rsv;
        struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
 
        /*
@@ -5957,10 +5940,15 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         * the middle of a transaction commit.  We also don't need the delalloc
         * mutex since we won't race with anybody.  We need this mostly to make
         * lockdep shut its filthy mouth.
+        *
+        * If we have a transaction open (can happen if we call truncate_block
+        * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
         */
        if (btrfs_is_free_space_inode(inode)) {
                flush = BTRFS_RESERVE_NO_FLUSH;
                delalloc_lock = false;
+       } else if (current->journal_info) {
+               flush = BTRFS_RESERVE_FLUSH_LIMIT;
        }
 
        if (flush != BTRFS_RESERVE_NO_FLUSH &&
@@ -7919,8 +7907,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
        printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
               info->flags,
               info->total_bytes - info->bytes_used - info->bytes_pinned -
-              info->bytes_reserved - info->bytes_readonly,
-              (info->full) ? "" : "not ");
+              info->bytes_reserved - info->bytes_readonly -
+              info->bytes_may_use, (info->full) ? "" : "not ");
        printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
               "reserved=%llu, may_use=%llu, readonly=%llu\n",
               info->total_bytes, info->bytes_used, info->bytes_pinned,
@@ -8367,6 +8355,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
        bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
                                                 SKINNY_METADATA);
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
        if (btrfs_test_is_dummy_root(root)) {
                buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
                                            level);
@@ -8374,6 +8363,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                        root->alloc_bytenr += blocksize;
                return buf;
        }
+#endif
 
        block_rsv = use_block_rsv(trans, root, blocksize);
        if (IS_ERR(block_rsv))
@@ -8553,7 +8543,8 @@ static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
 
        delayed_refs = &trans->transaction->delayed_refs;
        spin_lock(&delayed_refs->lock);
-       if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
+       if (btrfs_qgroup_insert_dirty_extent(trans->root->fs_info,
+                                            delayed_refs, qrecord))
                kfree(qrecord);
        spin_unlock(&delayed_refs->lock);
 
@@ -9900,7 +9891,22 @@ static int find_first_block_group(struct btrfs_root *root,
 
                if (found_key.objectid >= key->objectid &&
                    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
-                       ret = 0;
+                       struct extent_map_tree *em_tree;
+                       struct extent_map *em;
+
+                       em_tree = &root->fs_info->mapping_tree.map_tree;
+                       read_lock(&em_tree->lock);
+                       em = lookup_extent_mapping(em_tree, found_key.objectid,
+                                                  found_key.offset);
+                       read_unlock(&em_tree->lock);
+                       if (!em) {
+                               btrfs_err(root->fs_info,
+                       "logical %llu len %llu found bg but no related chunk",
+                                         found_key.objectid, found_key.offset);
+                               ret = -ENOENT;
+                       } else {
+                               ret = 0;
+                       }
                        goto out;
                }
                path->slots[0]++;