Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / fs / btrfs / file.c
index 8d7b5a4..2234e88 100644 (file)
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                reserve_bytes = round_up(write_bytes + sector_offset,
                                root->sectorsize);
 
-               if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
-                                             BTRFS_INODE_PREALLOC)) &&
-                   check_can_nocow(inode, pos, &write_bytes) > 0) {
-                       /*
-                        * For nodata cow case, no need to reserve
-                        * data space.
-                        */
-                       only_release_metadata = true;
-                       /*
-                        * our prealloc extent may be smaller than
-                        * write_bytes, so scale down.
-                        */
-                       num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                                PAGE_SIZE);
-                       reserve_bytes = round_up(write_bytes + sector_offset,
-                                       root->sectorsize);
-                       goto reserve_metadata;
-               }
-
                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
-               if (ret < 0)
-                       break;
+               if (ret < 0) {
+                       if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+                                                     BTRFS_INODE_PREALLOC)) &&
+                           check_can_nocow(inode, pos, &write_bytes) > 0) {
+                               /*
+                                * For nodata cow case, no need to reserve
+                                * data space.
+                                */
+                               only_release_metadata = true;
+                               /*
+                                * our prealloc extent may be smaller than
+                                * write_bytes, so scale down.
+                                */
+                               num_pages = DIV_ROUND_UP(write_bytes + offset,
+                                                        PAGE_SIZE);
+                               reserve_bytes = round_up(write_bytes +
+                                                        sector_offset,
+                                                        root->sectorsize);
+                       } else {
+                               break;
+                       }
+               }
 
-reserve_metadata:
                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
                if (ret) {
                        if (!only_release_metadata)
@@ -1596,6 +1596,13 @@ again:
 
                copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
 
+               num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               reserve_bytes);
+               dirty_sectors = round_up(copied + sector_offset,
+                                       root->sectorsize);
+               dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               dirty_sectors);
+
                /*
                 * if we have trouble faulting in the pages, fall
                 * back to one page at a time
@@ -1605,6 +1612,7 @@ again:
 
                if (copied == 0) {
                        force_page_uptodate = true;
+                       dirty_sectors = 0;
                        dirty_pages = 0;
                } else {
                        force_page_uptodate = false;
@@ -1615,20 +1623,19 @@ again:
                /*
                 * If we had a short copy we need to release the excess delaloc
                 * bytes we reserved.  We need to increment outstanding_extents
-                * because btrfs_delalloc_release_space will decrement it, but
+                * because btrfs_delalloc_release_space and
+                * btrfs_delalloc_release_metadata will decrement it, but
                 * we still have an outstanding extent for the chunk we actually
                 * managed to copy.
                 */
-               num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-                                               reserve_bytes);
-               dirty_sectors = round_up(copied + sector_offset,
-                                       root->sectorsize);
-               dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-                                               dirty_sectors);
-
                if (num_sectors > dirty_sectors) {
-                       release_bytes = (write_bytes - copied)
-                               & ~((u64)root->sectorsize - 1);
+                       /*
+                        * we round down because we don't want to count
+                        * any partial blocks actually sent through the
+                        * IO machines
+                        */
+                       release_bytes = round_down(release_bytes - copied,
+                                     root->sectorsize);
                        if (copied > 0) {
                                spin_lock(&BTRFS_I(inode)->lock);
                                BTRFS_I(inode)->outstanding_extents++;
@@ -1696,25 +1703,26 @@ again:
                        btrfs_end_write_no_snapshoting(root);
                        btrfs_delalloc_release_metadata(inode, release_bytes);
                } else {
-                       btrfs_delalloc_release_space(inode, pos, release_bytes);
+                       btrfs_delalloc_release_space(inode,
+                                               round_down(pos, root->sectorsize),
+                                               release_bytes);
                }
        }
 
        return num_written ? num_written : ret;
 }
 
-static ssize_t __btrfs_direct_write(struct kiocb *iocb,
-                                   struct iov_iter *from,
-                                   loff_t pos)
+static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
+       loff_t pos = iocb->ki_pos;
        ssize_t written;
        ssize_t written_buffered;
        loff_t endbyte;
        int err;
 
-       written = generic_file_direct_write(iocb, from, pos);
+       written = generic_file_direct_write(iocb, from);
 
        if (written < 0 || !iov_iter_count(from))
                return written;
@@ -1832,7 +1840,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
                atomic_inc(&BTRFS_I(inode)->sync_writers);
 
        if (iocb->ki_flags & IOCB_DIRECT) {
-               num_written = __btrfs_direct_write(iocb, from, pos);
+               num_written = __btrfs_direct_write(iocb, from);
        } else {
                num_written = __btrfs_buffered_write(file, from, pos);
                if (num_written > 0)
@@ -1852,11 +1860,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        spin_lock(&BTRFS_I(inode)->lock);
        BTRFS_I(inode)->last_sub_trans = root->log_transid;
        spin_unlock(&BTRFS_I(inode)->lock);
-       if (num_written > 0) {
-               err = generic_write_sync(file, pos, num_written);
-               if (err < 0)
-                       num_written = err;
-       }
+       if (num_written > 0)
+               num_written = generic_write_sync(iocb, num_written);
 
        if (sync)
                atomic_dec(&BTRFS_I(inode)->sync_writers);
@@ -2024,7 +2029,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
             BTRFS_I(inode)->last_trans
             <= root->fs_info->last_trans_committed)) {
                /*
-                * We'v had everything committed since the last time we were
+                * We've had everything committed since the last time we were
                 * modified so clear this flag in case it was set for whatever
                 * reason, it's no longer relevant.
                 */
@@ -2372,7 +2377,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 
        /* Check the aligned pages after the first unaligned page,
         * if offset != orig_start, which means the first unaligned page
-        * including serveral following pages are already in holes,
+        * including several following pages are already in holes,
         * the extra check can be skipped */
        if (offset == orig_start) {
                /* after truncate page, check hole again */
@@ -2956,7 +2961,7 @@ const struct file_operations btrfs_file_operations = {
        .fallocate      = btrfs_fallocate,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
-       .compat_ioctl   = btrfs_ioctl,
+       .compat_ioctl   = btrfs_compat_ioctl,
 #endif
        .copy_file_range = btrfs_copy_file_range,
        .clone_file_range = btrfs_clone_file_range,