data = *to;
ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
xfs_get_blocks_direct, NULL, NULL, 0);
- if (ret > 0) {
+ if (ret >= 0) {
iocb->ki_pos += ret;
iov_iter_advance(to, ret);
}
return ret;
}
-STATIC ssize_t
-xfs_file_splice_read(
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t count,
- unsigned int flags)
-{
- struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
- ssize_t ret;
-
- XFS_STATS_INC(ip->i_mount, xs_read_calls);
-
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- trace_xfs_file_splice_read(ip, count, *ppos);
-
- /*
- * DAX inodes cannot ues the page cache for splice, so we have to push
- * them through the VFS IO path. This means it goes through
- * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
- * cannot lock the splice operation at this level for DAX inodes.
- */
- if (IS_DAX(VFS_I(ip))) {
- ret = default_file_splice_read(infilp, ppos, pipe, count,
- flags);
- goto out;
- }
-
- xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
- ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
-out:
- if (ret > 0)
- XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
- return ret;
-}
-
/*
* Zero any on disk space between the current EOF and the new, larger EOF.
*
enospc = xfs_inode_free_quota_eofblocks(ip);
if (enospc)
goto write_retry;
+ enospc = xfs_inode_free_quota_cowblocks(ip);
+ if (enospc)
+ goto write_retry;
} else if (ret == -ENOSPC && !enospc) {
struct xfs_eofblocks eofb = {0};
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE)
+ FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
STATIC long
xfs_file_fallocate(
if (mode & FALLOC_FL_ZERO_RANGE)
error = xfs_zero_file_space(ip, offset, len);
- else
+ else {
+ if (mode & FALLOC_FL_UNSHARE_RANGE) {
+ error = xfs_reflink_unshare(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
error = xfs_alloc_file_space(ip, offset, len,
XFS_BMAPI_PREALLOC);
+ }
if (error)
goto out_unlock;
}
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = xfs_setattr_size(ip, &iattr);
+ error = xfs_vn_setattr_size(file_dentry(file), &iattr);
if (error)
goto out_unlock;
}
if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
return -EINVAL;
+ /* Don't share DAX file data for now. */
+ if (IS_DAX(inode_in) || IS_DAX(inode_out))
+ return -EINVAL;
+
/* Are we going all the way to the end? */
isize = i_size_read(inode_in);
if (isize == 0)
/* Wait for the completion of any pending IOs on srcfile */
ret = xfs_file_wait_for_io(inode_in, pos_in, len);
if (ret)
- goto out_unlock;
+ goto out;
ret = xfs_file_wait_for_io(inode_out, pos_out, len);
if (ret)
- goto out_unlock;
+ goto out;
if (is_dedupe)
flags |= XFS_REFLINK_DEDUPE;
ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
pos_out, len, flags);
if (ret < 0)
- goto out_unlock;
+ goto out;
-out_unlock:
+out:
return ret;
}
.llseek = xfs_file_llseek,
.read_iter = xfs_file_read_iter,
.write_iter = xfs_file_write_iter,
- .splice_read = xfs_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
+ .get_unmapped_area = thp_get_unmapped_area,
.fallocate = xfs_file_fallocate,
.copy_file_range = xfs_file_copy_range,
.clone_file_range = xfs_file_clone_range,