Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Nov 2014 17:41:26 +0000 (10:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Nov 2014 17:41:26 +0000 (10:41 -0700)
Pull btrfs fixes from Chris Mason:
 "Filipe is nailing down some problems with our skinny extent variation,
  and Dave's patch fixes endian problems in the new super block checks"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix race that makes btrfs_lookup_extent_info miss skinny extent items
  Btrfs: properly clean up btrfs_end_io_wq_cache
  Btrfs: fix invalid leaf slot access in btrfs_lookup_extent()
  btrfs: use macro accessors in superblock validation checks

1  2 
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/tree-log.c

diff --combined fs/btrfs/disk-io.c
@@@ -1194,7 -1194,7 +1194,7 @@@ static struct btrfs_subvolume_writers *
        if (!writers)
                return ERR_PTR(-ENOMEM);
  
 -      ret = percpu_counter_init(&writers->counter, 0);
 +      ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
        if (ret < 0) {
                kfree(writers);
                return ERR_PTR(ret);
@@@ -1702,7 -1702,7 +1702,7 @@@ static int btrfs_congested_fn(void *con
                if (!device->bdev)
                        continue;
                bdi = blk_get_backing_dev_info(device->bdev);
 -              if (bdi && bdi_congested(bdi, bdi_bits)) {
 +              if (bdi_congested(bdi, bdi_bits)) {
                        ret = 1;
                        break;
                }
@@@ -2187,7 -2187,7 +2187,7 @@@ int open_ctree(struct super_block *sb
                goto fail_srcu;
        }
  
 -      ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
 +      ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
        if (ret) {
                err = ret;
                goto fail_bdi;
        fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
                                        (1 + ilog2(nr_cpu_ids));
  
 -      ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
 +      ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
        if (ret) {
                err = ret;
                goto fail_dirty_metadata_bytes;
        }
  
 -      ret = percpu_counter_init(&fs_info->bio_counter, 0);
 +      ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
        if (ret) {
                err = ret;
                goto fail_delalloc_bytes;
@@@ -3817,19 -3817,19 +3817,19 @@@ static int btrfs_check_super_valid(stru
        struct btrfs_super_block *sb = fs_info->super_copy;
        int ret = 0;
  
-       if (sb->root_level > BTRFS_MAX_LEVEL) {
-               printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n",
-                               sb->root_level, BTRFS_MAX_LEVEL);
+       if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
+               printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
+                               btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
                ret = -EINVAL;
        }
-       if (sb->chunk_root_level > BTRFS_MAX_LEVEL) {
-               printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n",
-                               sb->chunk_root_level, BTRFS_MAX_LEVEL);
+       if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
+               printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
+                               btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
                ret = -EINVAL;
        }
-       if (sb->log_root_level > BTRFS_MAX_LEVEL) {
-               printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n",
-                               sb->log_root_level, BTRFS_MAX_LEVEL);
+       if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
+               printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
+                               btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
                ret = -EINVAL;
        }
  
         * The common minimum, we don't know if we can trust the nodesize/sectorsize
         * items yet, they'll be verified later. Issue just a warning.
         */
-       if (!IS_ALIGNED(sb->root, 4096))
+       if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
                printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
                                sb->root);
-       if (!IS_ALIGNED(sb->chunk_root, 4096))
+       if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
                printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
                                sb->chunk_root);
-       if (!IS_ALIGNED(sb->log_root, 4096))
+       if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
                printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
-                               sb->log_root);
+                               btrfs_super_log_root(sb));
  
        if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
                printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
         * done later
         */
-       if (sb->num_devices > (1UL << 31))
+       if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
-                               sb->num_devices);
+                               btrfs_super_num_devices(sb));
  
-       if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) {
+       if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
                printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
-                               sb->bytenr, BTRFS_SUPER_INFO_OFFSET);
+                               btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
                ret = -EINVAL;
        }
  
         * The generation is a global counter, we'll trust it more than the others
         * but it's still possible that it's the one that's wrong.
         */
-       if (sb->generation < sb->chunk_root_generation)
+       if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
                printk(KERN_WARNING
                        "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
-                       sb->generation, sb->chunk_root_generation);
-       if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1)
+                       btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
+       if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
+           && btrfs_super_cache_generation(sb) != (u64)-1)
                printk(KERN_WARNING
                        "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
-                       sb->generation, sb->cache_generation);
+                       btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
  
        return ret;
  }
diff --combined fs/btrfs/extent-tree.c
@@@ -710,8 -710,8 +710,8 @@@ void btrfs_clear_space_info_full(struc
        rcu_read_unlock();
  }
  
- /* simple helper to search for an existing extent at a given offset */
- int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
+ /* simple helper to search for an existing data extent at a given offset */
+ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
  {
        int ret;
        struct btrfs_key key;
        key.type = BTRFS_EXTENT_ITEM_KEY;
        ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
                                0, 0);
-       if (ret > 0) {
-               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
-               if (key.objectid == start &&
-                   key.type == BTRFS_METADATA_ITEM_KEY)
-                       ret = 0;
-       }
        btrfs_free_path(path);
        return ret;
  }
@@@ -786,7 -780,6 +780,6 @@@ search_again
        else
                key.type = BTRFS_EXTENT_ITEM_KEY;
  
- again:
        ret = btrfs_search_slot(trans, root->fs_info->extent_root,
                                &key, path, 0, 0);
        if (ret < 0)
                            key.offset == root->nodesize)
                                ret = 0;
                }
-               if (ret) {
-                       key.objectid = bytenr;
-                       key.type = BTRFS_EXTENT_ITEM_KEY;
-                       key.offset = root->nodesize;
-                       btrfs_release_path(path);
-                       goto again;
-               }
        }
  
        if (ret == 0) {
@@@ -3494,7 -3480,7 +3480,7 @@@ static int update_space_info(struct btr
        if (!found)
                return -ENOMEM;
  
 -      ret = percpu_counter_init(&found->total_bytes_pinned, 0);
 +      ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
        if (ret) {
                kfree(found);
                return ret;
diff --combined fs/btrfs/tree-log.c
@@@ -672,7 -672,7 +672,7 @@@ static noinline int replay_one_extent(s
                         * is this extent already allocated in the extent
                         * allocation tree?  If so, just add a reference
                         */
-                       ret = btrfs_lookup_extent(root, ins.objectid,
+                       ret = btrfs_lookup_data_extent(root, ins.objectid,
                                                ins.offset);
                        if (ret == 0) {
                                ret = btrfs_inc_extent_ref(trans, root,
@@@ -4035,8 -4035,7 +4035,8 @@@ again
                if (ret < 0) {
                        err = ret;
                        goto out_unlock;
 -              } if (ret) {
 +              }
 +              if (ret) {
                        ins_nr = 0;
                        btrfs_release_path(path);
                        continue;
@@@ -4135,8 -4134,18 +4135,8 @@@ log_extents
                }
        }
  
 -      write_lock(&em_tree->lock);
 -      /*
 -       * If we're doing a ranged fsync and there are still modified extents
 -       * in the list, we must run on the next fsync call as it might cover
 -       * those extents (a full fsync or an fsync for other range).
 -       */
 -      if (list_empty(&em_tree->modified_extents)) {
 -              BTRFS_I(inode)->logged_trans = trans->transid;
 -              BTRFS_I(inode)->last_log_commit =
 -                      BTRFS_I(inode)->last_sub_trans;
 -      }
 -      write_unlock(&em_tree->lock);
 +      BTRFS_I(inode)->logged_trans = trans->transid;
 +      BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
  out_unlock:
        if (unlikely(err))
                btrfs_put_logged_extents(&logged_list);