2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <linux/atomic.h>
61 #define NFSDBG_FACILITY NFSDBG_VFS
63 static struct kmem_cache *nfs_direct_cachep;
66 * This represents a set of asynchronous requests that we're waiting on
68 struct nfs_direct_req {
69 struct kref kref; /* release manager */
72 struct nfs_open_context *ctx; /* file open context info */
73 struct nfs_lock_context *l_ctx; /* Lock context info */
74 struct kiocb * iocb; /* controlling i/o request */
75 struct inode * inode; /* target file of i/o */
77 /* completion state */
78 atomic_t io_count; /* i/os we're waiting for */
79 spinlock_t lock; /* protect completion state */
80 ssize_t count, /* bytes actually processed */
81 error; /* any reported error */
82 struct completion completion; /* wait for i/o completion */
85 struct list_head rewrite_list; /* saved nfs_write_data structs */
86 struct nfs_write_data * commit_data; /* special write_data for commits */
88 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
89 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
90 struct nfs_writeverf verf; /* unstable write verifier */
93 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
94 static const struct rpc_call_ops nfs_write_direct_ops;
96 static inline void get_dreq(struct nfs_direct_req *dreq)
98 atomic_inc(&dreq->io_count);
101 static inline int put_dreq(struct nfs_direct_req *dreq)
103 return atomic_dec_and_test(&dreq->io_count);
107 * nfs_direct_IO - NFS address space operation for direct I/O
108 * @rw: direction (read or write)
109 * @iocb: target I/O control block
110 * @iov: array of vectors that define I/O buffer
111 * @pos: offset in file to begin the operation
112 * @nr_segs: size of iovec array
114 * The presence of this routine in the address space ops vector means
115 * the NFS client supports direct I/O. However, we shunt off direct
116 * read and write requests before the VFS gets them, so this method
117 * should never be called.
119 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
121 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
122 iocb->ki_filp->f_path.dentry->d_name.name,
123 (long long) pos, nr_segs);
128 static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
135 pages += (pgbase >> PAGE_SHIFT);
136 npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
137 for (i = 0; i < npages; i++) {
138 struct page *page = pages[i];
139 if (!PageCompound(page))
140 set_page_dirty(page);
144 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
147 for (i = 0; i < npages; i++)
148 page_cache_release(pages[i]);
151 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
153 struct nfs_direct_req *dreq;
155 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
159 kref_init(&dreq->kref);
160 kref_get(&dreq->kref);
161 init_completion(&dreq->completion);
162 INIT_LIST_HEAD(&dreq->rewrite_list);
166 spin_lock_init(&dreq->lock);
167 atomic_set(&dreq->io_count, 0);
175 static void nfs_direct_req_free(struct kref *kref)
177 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
179 if (dreq->l_ctx != NULL)
180 nfs_put_lock_context(dreq->l_ctx);
181 if (dreq->ctx != NULL)
182 put_nfs_open_context(dreq->ctx);
183 kmem_cache_free(nfs_direct_cachep, dreq);
186 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
188 kref_put(&dreq->kref, nfs_direct_req_free);
192 * Collects and returns the final error value/byte-count.
194 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
196 ssize_t result = -EIOCBQUEUED;
198 /* Async requests don't wait here */
202 result = wait_for_completion_killable(&dreq->completion);
205 result = dreq->error;
207 result = dreq->count;
210 return (ssize_t) result;
214 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
215 * the iocb is still valid here if this is a synchronous request.
217 static void nfs_direct_complete(struct nfs_direct_req *dreq)
220 long res = (long) dreq->error;
222 res = (long) dreq->count;
223 aio_complete(dreq->iocb, res, 0);
225 complete_all(&dreq->completion);
227 nfs_direct_req_release(dreq);
231 * We must hold a reference to all the pages in this direct read request
232 * until the RPCs complete. This could be long *after* we are woken up in
233 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
235 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
237 struct nfs_read_data *data = calldata;
239 nfs_readpage_result(task, data);
242 static void nfs_direct_read_release(void *calldata)
245 struct nfs_read_data *data = calldata;
246 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
247 int status = data->task.tk_status;
249 spin_lock(&dreq->lock);
250 if (unlikely(status < 0)) {
251 dreq->error = status;
252 spin_unlock(&dreq->lock);
254 dreq->count += data->res.count;
255 spin_unlock(&dreq->lock);
256 nfs_direct_dirty_pages(data->pagevec,
260 nfs_direct_release_pages(data->pagevec, data->npages);
263 nfs_direct_complete(dreq);
264 nfs_readdata_free(data);
267 static const struct rpc_call_ops nfs_read_direct_ops = {
268 .rpc_call_prepare = nfs_read_prepare,
269 .rpc_call_done = nfs_direct_read_result,
270 .rpc_release = nfs_direct_read_release,
274 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
275 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
276 * bail and stop sending more reads. Read length accounting is
277 * handled automatically by nfs_direct_read_result(). Otherwise, if
278 * no requests have been sent, just return an error.
280 static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
281 const struct iovec *iov,
284 struct nfs_open_context *ctx = dreq->ctx;
285 struct inode *inode = ctx->dentry->d_inode;
286 unsigned long user_addr = (unsigned long)iov->iov_base;
287 size_t count = iov->iov_len;
288 size_t rsize = NFS_SERVER(inode)->rsize;
289 struct rpc_task *task;
290 struct rpc_message msg = {
291 .rpc_cred = ctx->cred,
293 struct rpc_task_setup task_setup_data = {
294 .rpc_client = NFS_CLIENT(inode),
296 .callback_ops = &nfs_read_direct_ops,
297 .workqueue = nfsiod_workqueue,
298 .flags = RPC_TASK_ASYNC,
305 struct nfs_read_data *data;
308 pgbase = user_addr & ~PAGE_MASK;
309 bytes = min(rsize,count);
312 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
316 down_read(¤t->mm->mmap_sem);
317 result = get_user_pages(current, current->mm, user_addr,
318 data->npages, 1, 0, data->pagevec, NULL);
319 up_read(¤t->mm->mmap_sem);
321 nfs_readdata_free(data);
324 if ((unsigned)result < data->npages) {
325 bytes = result * PAGE_SIZE;
326 if (bytes <= pgbase) {
327 nfs_direct_release_pages(data->pagevec, result);
328 nfs_readdata_free(data);
332 data->npages = result;
337 data->req = (struct nfs_page *) dreq;
339 data->cred = msg.rpc_cred;
340 data->args.fh = NFS_FH(inode);
341 data->args.context = ctx;
342 data->args.lock_context = dreq->l_ctx;
343 data->args.offset = pos;
344 data->args.pgbase = pgbase;
345 data->args.pages = data->pagevec;
346 data->args.count = bytes;
347 data->res.fattr = &data->fattr;
349 data->res.count = bytes;
350 nfs_fattr_init(&data->fattr);
351 msg.rpc_argp = &data->args;
352 msg.rpc_resp = &data->res;
354 task_setup_data.task = &data->task;
355 task_setup_data.callback_data = data;
356 NFS_PROTO(inode)->read_setup(data, &msg);
358 task = rpc_run_task(&task_setup_data);
363 dprintk("NFS: %5u initiated direct read call "
364 "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
367 (long long)NFS_FILEID(inode),
369 (unsigned long long)data->args.offset);
374 /* FIXME: Remove this unnecessary math from final patch */
376 pgbase &= ~PAGE_MASK;
377 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
380 } while (count != 0);
384 return result < 0 ? (ssize_t) result : -EFAULT;
387 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
388 const struct iovec *iov,
389 unsigned long nr_segs,
392 ssize_t result = -EINVAL;
393 size_t requested_bytes = 0;
398 for (seg = 0; seg < nr_segs; seg++) {
399 const struct iovec *vec = &iov[seg];
400 result = nfs_direct_read_schedule_segment(dreq, vec, pos);
403 requested_bytes += result;
404 if ((size_t)result < vec->iov_len)
410 * If no bytes were started, return the error, and let the
411 * generic layer handle the completion.
413 if (requested_bytes == 0) {
414 nfs_direct_req_release(dreq);
415 return result < 0 ? result : -EIO;
419 nfs_direct_complete(dreq);
423 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
424 unsigned long nr_segs, loff_t pos)
426 ssize_t result = -ENOMEM;
427 struct inode *inode = iocb->ki_filp->f_mapping->host;
428 struct nfs_direct_req *dreq;
430 dreq = nfs_direct_req_alloc();
435 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
436 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
437 if (dreq->l_ctx == NULL)
439 if (!is_sync_kiocb(iocb))
442 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
444 result = nfs_direct_wait(dreq);
446 nfs_direct_req_release(dreq);
451 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
453 while (!list_empty(&dreq->rewrite_list)) {
454 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
455 list_del(&data->pages);
456 nfs_direct_release_pages(data->pagevec, data->npages);
457 nfs_writedata_free(data);
461 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
462 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
464 struct inode *inode = dreq->inode;
466 struct nfs_write_data *data;
467 struct rpc_task *task;
468 struct rpc_message msg = {
469 .rpc_cred = dreq->ctx->cred,
471 struct rpc_task_setup task_setup_data = {
472 .rpc_client = NFS_CLIENT(inode),
474 .callback_ops = &nfs_write_direct_ops,
475 .workqueue = nfsiod_workqueue,
476 .flags = RPC_TASK_ASYNC,
482 list_for_each(p, &dreq->rewrite_list) {
483 data = list_entry(p, struct nfs_write_data, pages);
487 /* Use stable writes */
488 data->args.stable = NFS_FILE_SYNC;
493 nfs_fattr_init(&data->fattr);
494 data->res.count = data->args.count;
495 memset(&data->verf, 0, sizeof(data->verf));
498 * Reuse data->task; data->args should not have changed
499 * since the original request was sent.
501 task_setup_data.task = &data->task;
502 task_setup_data.callback_data = data;
503 msg.rpc_argp = &data->args;
504 msg.rpc_resp = &data->res;
505 NFS_PROTO(inode)->write_setup(data, &msg);
508 * We're called via an RPC callback, so BKL is already held.
510 task = rpc_run_task(&task_setup_data);
514 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
517 (long long)NFS_FILEID(inode),
519 (unsigned long long)data->args.offset);
523 nfs_direct_write_complete(dreq, inode);
526 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
528 struct nfs_write_data *data = calldata;
530 /* Call the NFS version-specific code */
531 NFS_PROTO(data->inode)->commit_done(task, data);
534 static void nfs_direct_commit_release(void *calldata)
536 struct nfs_write_data *data = calldata;
537 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
538 int status = data->task.tk_status;
541 dprintk("NFS: %5u commit failed with error %d.\n",
542 data->task.tk_pid, status);
543 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
544 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
545 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
546 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
549 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
550 nfs_direct_write_complete(dreq, data->inode);
551 nfs_commit_free(data);
554 static const struct rpc_call_ops nfs_commit_direct_ops = {
555 .rpc_call_prepare = nfs_write_prepare,
556 .rpc_call_done = nfs_direct_commit_result,
557 .rpc_release = nfs_direct_commit_release,
560 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
562 struct nfs_write_data *data = dreq->commit_data;
563 struct rpc_task *task;
564 struct rpc_message msg = {
565 .rpc_argp = &data->args,
566 .rpc_resp = &data->res,
567 .rpc_cred = dreq->ctx->cred,
569 struct rpc_task_setup task_setup_data = {
571 .rpc_client = NFS_CLIENT(dreq->inode),
573 .callback_ops = &nfs_commit_direct_ops,
574 .callback_data = data,
575 .workqueue = nfsiod_workqueue,
576 .flags = RPC_TASK_ASYNC,
579 data->inode = dreq->inode;
580 data->cred = msg.rpc_cred;
582 data->args.fh = NFS_FH(data->inode);
583 data->args.offset = 0;
584 data->args.count = 0;
585 data->args.context = dreq->ctx;
586 data->args.lock_context = dreq->l_ctx;
588 data->res.fattr = &data->fattr;
589 data->res.verf = &data->verf;
590 nfs_fattr_init(&data->fattr);
592 NFS_PROTO(data->inode)->commit_setup(data, &msg);
594 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
595 dreq->commit_data = NULL;
597 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
599 task = rpc_run_task(&task_setup_data);
604 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
606 int flags = dreq->flags;
610 case NFS_ODIRECT_DO_COMMIT:
611 nfs_direct_commit_schedule(dreq);
613 case NFS_ODIRECT_RESCHED_WRITES:
614 nfs_direct_write_reschedule(dreq);
617 if (dreq->commit_data != NULL)
618 nfs_commit_free(dreq->commit_data);
619 nfs_direct_free_writedata(dreq);
620 nfs_zap_mapping(inode, inode->i_mapping);
621 nfs_direct_complete(dreq);
625 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
627 dreq->commit_data = nfs_commitdata_alloc();
628 if (dreq->commit_data != NULL)
629 dreq->commit_data->req = (struct nfs_page *) dreq;
632 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
634 dreq->commit_data = NULL;
637 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
639 nfs_direct_free_writedata(dreq);
640 nfs_zap_mapping(inode, inode->i_mapping);
641 nfs_direct_complete(dreq);
645 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
647 struct nfs_write_data *data = calldata;
649 nfs_writeback_done(task, data);
653 * NB: Return the value of the first error return code. Subsequent
654 * errors after the first one are ignored.
656 static void nfs_direct_write_release(void *calldata)
658 struct nfs_write_data *data = calldata;
659 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
660 int status = data->task.tk_status;
662 spin_lock(&dreq->lock);
664 if (unlikely(status < 0)) {
665 /* An error has occurred, so we should not commit */
667 dreq->error = status;
669 if (unlikely(dreq->error != 0))
672 dreq->count += data->res.count;
674 if (data->res.verf->committed != NFS_FILE_SYNC) {
675 switch (dreq->flags) {
677 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
678 dreq->flags = NFS_ODIRECT_DO_COMMIT;
680 case NFS_ODIRECT_DO_COMMIT:
681 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
682 dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
683 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
688 spin_unlock(&dreq->lock);
691 nfs_direct_write_complete(dreq, data->inode);
694 static const struct rpc_call_ops nfs_write_direct_ops = {
695 .rpc_call_prepare = nfs_write_prepare,
696 .rpc_call_done = nfs_direct_write_result,
697 .rpc_release = nfs_direct_write_release,
701 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
702 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
703 * bail and stop sending more writes. Write length accounting is
704 * handled automatically by nfs_direct_write_result(). Otherwise, if
705 * no requests have been sent, just return an error.
707 static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
708 const struct iovec *iov,
709 loff_t pos, int sync)
711 struct nfs_open_context *ctx = dreq->ctx;
712 struct inode *inode = ctx->dentry->d_inode;
713 unsigned long user_addr = (unsigned long)iov->iov_base;
714 size_t count = iov->iov_len;
715 struct rpc_task *task;
716 struct rpc_message msg = {
717 .rpc_cred = ctx->cred,
719 struct rpc_task_setup task_setup_data = {
720 .rpc_client = NFS_CLIENT(inode),
722 .callback_ops = &nfs_write_direct_ops,
723 .workqueue = nfsiod_workqueue,
724 .flags = RPC_TASK_ASYNC,
726 size_t wsize = NFS_SERVER(inode)->wsize;
732 struct nfs_write_data *data;
735 pgbase = user_addr & ~PAGE_MASK;
736 bytes = min(wsize,count);
739 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
743 down_read(¤t->mm->mmap_sem);
744 result = get_user_pages(current, current->mm, user_addr,
745 data->npages, 0, 0, data->pagevec, NULL);
746 up_read(¤t->mm->mmap_sem);
748 nfs_writedata_free(data);
751 if ((unsigned)result < data->npages) {
752 bytes = result * PAGE_SIZE;
753 if (bytes <= pgbase) {
754 nfs_direct_release_pages(data->pagevec, result);
755 nfs_writedata_free(data);
759 data->npages = result;
764 list_move_tail(&data->pages, &dreq->rewrite_list);
766 data->req = (struct nfs_page *) dreq;
768 data->cred = msg.rpc_cred;
769 data->args.fh = NFS_FH(inode);
770 data->args.context = ctx;
771 data->args.lock_context = dreq->l_ctx;
772 data->args.offset = pos;
773 data->args.pgbase = pgbase;
774 data->args.pages = data->pagevec;
775 data->args.count = bytes;
776 data->args.stable = sync;
777 data->res.fattr = &data->fattr;
778 data->res.count = bytes;
779 data->res.verf = &data->verf;
780 nfs_fattr_init(&data->fattr);
782 task_setup_data.task = &data->task;
783 task_setup_data.callback_data = data;
784 msg.rpc_argp = &data->args;
785 msg.rpc_resp = &data->res;
786 NFS_PROTO(inode)->write_setup(data, &msg);
788 task = rpc_run_task(&task_setup_data);
793 dprintk("NFS: %5u initiated direct write call "
794 "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
797 (long long)NFS_FILEID(inode),
799 (unsigned long long)data->args.offset);
805 /* FIXME: Remove this useless math from the final patch */
807 pgbase &= ~PAGE_MASK;
808 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
811 } while (count != 0);
815 return result < 0 ? (ssize_t) result : -EFAULT;
818 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
819 const struct iovec *iov,
820 unsigned long nr_segs,
821 loff_t pos, int sync)
824 size_t requested_bytes = 0;
829 for (seg = 0; seg < nr_segs; seg++) {
830 const struct iovec *vec = &iov[seg];
831 result = nfs_direct_write_schedule_segment(dreq, vec,
835 requested_bytes += result;
836 if ((size_t)result < vec->iov_len)
842 * If no bytes were started, return the error, and let the
843 * generic layer handle the completion.
845 if (requested_bytes == 0) {
846 nfs_direct_req_release(dreq);
847 return result < 0 ? result : -EIO;
851 nfs_direct_write_complete(dreq, dreq->inode);
855 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
856 unsigned long nr_segs, loff_t pos,
859 ssize_t result = -ENOMEM;
860 struct inode *inode = iocb->ki_filp->f_mapping->host;
861 struct nfs_direct_req *dreq;
862 size_t wsize = NFS_SERVER(inode)->wsize;
863 int sync = NFS_UNSTABLE;
865 dreq = nfs_direct_req_alloc();
868 nfs_alloc_commit_data(dreq);
870 if (dreq->commit_data == NULL || count <= wsize)
871 sync = NFS_FILE_SYNC;
874 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
875 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
876 if (dreq->l_ctx == NULL)
878 if (!is_sync_kiocb(iocb))
881 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
883 result = nfs_direct_wait(dreq);
885 nfs_direct_req_release(dreq);
891 * nfs_file_direct_read - file direct read operation for NFS files
892 * @iocb: target I/O control block
893 * @iov: vector of user buffers into which to read data
894 * @nr_segs: size of iov vector
895 * @pos: byte offset in file where reading starts
897 * We use this function for direct reads instead of calling
898 * generic_file_aio_read() in order to avoid gfar's check to see if
899 * the request starts before the end of the file. For that check
900 * to work, we must generate a GETATTR before each direct read, and
901 * even then there is a window between the GETATTR and the subsequent
902 * READ where the file size could change. Our preference is simply
903 * to do all reads the application wants, and the server will take
904 * care of managing the end of file boundary.
906 * This function also eliminates unnecessarily updating the file's
907 * atime locally, as the NFS server sets the file's atime, and this
908 * client must read the updated atime from the server back into its
911 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
912 unsigned long nr_segs, loff_t pos)
914 ssize_t retval = -EINVAL;
915 struct file *file = iocb->ki_filp;
916 struct address_space *mapping = file->f_mapping;
919 count = iov_length(iov, nr_segs);
920 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
922 dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
923 file->f_path.dentry->d_parent->d_name.name,
924 file->f_path.dentry->d_name.name,
925 count, (long long) pos);
931 retval = nfs_sync_mapping(mapping);
935 task_io_account_read(count);
937 retval = nfs_direct_read(iocb, iov, nr_segs, pos);
939 iocb->ki_pos = pos + retval;
946 * nfs_file_direct_write - file direct write operation for NFS files
947 * @iocb: target I/O control block
948 * @iov: vector of user buffers from which to write data
949 * @nr_segs: size of iov vector
950 * @pos: byte offset in file where writing starts
952 * We use this function for direct writes instead of calling
953 * generic_file_aio_write() in order to avoid taking the inode
954 * semaphore and updating the i_size. The NFS server will set
955 * the new i_size and this client must read the updated size
956 * back into its cache. We let the server do generic write
957 * parameter checking and report problems.
959 * We eliminate local atime updates, see direct read above.
961 * We avoid unnecessary page cache invalidations for normal cached
962 * readers of this file.
964 * Note that O_APPEND is not supported for NFS direct writes, as there
965 * is no atomic O_APPEND write facility in the NFS protocol.
967 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
968 unsigned long nr_segs, loff_t pos)
970 ssize_t retval = -EINVAL;
971 struct file *file = iocb->ki_filp;
972 struct address_space *mapping = file->f_mapping;
975 count = iov_length(iov, nr_segs);
976 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
978 dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
979 file->f_path.dentry->d_parent->d_name.name,
980 file->f_path.dentry->d_name.name,
981 count, (long long) pos);
983 retval = generic_write_checks(file, &pos, &count, 0);
988 if ((ssize_t) count < 0)
994 retval = nfs_sync_mapping(mapping);
998 task_io_account_write(count);
1000 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1003 iocb->ki_pos = pos + retval;
1010 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1013 int __init nfs_init_directcache(void)
1015 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1016 sizeof(struct nfs_direct_req),
1017 0, (SLAB_RECLAIM_ACCOUNT|
1020 if (nfs_direct_cachep == NULL)
1027 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1030 void nfs_destroy_directcache(void)
1032 kmem_cache_destroy(nfs_direct_cachep);