NFS: Create a generic_pgio function
[cascardo/linux.git] / fs / nfs / read.c
1 /*
2  * linux/fs/nfs/read.c
3  *
4  * Block I/O for NFS
5  *
6  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7  * modified for async RPC by okir@monad.swb.de
8  */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
22
23 #include "nfs4_fs.h"
24 #include "internal.h"
25 #include "iostat.h"
26 #include "fscache.h"
27 #include "pnfs.h"
28
29 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
30
31 static const struct nfs_pageio_ops nfs_pageio_read_ops;
32 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
33 static const struct nfs_rw_ops nfs_rw_read_ops;
34
35 static struct kmem_cache *nfs_rdata_cachep;
36
37 static struct nfs_rw_header *nfs_readhdr_alloc(void)
38 {
39         return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
40 }
41
42 static void nfs_readhdr_free(struct nfs_rw_header *rhdr)
43 {
44         kmem_cache_free(nfs_rdata_cachep, rhdr);
45 }
46
47 static
48 int nfs_return_empty_page(struct page *page)
49 {
50         zero_user(page, 0, PAGE_CACHE_SIZE);
51         SetPageUptodate(page);
52         unlock_page(page);
53         return 0;
54 }
55
56 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
57                               struct inode *inode, bool force_mds,
58                               const struct nfs_pgio_completion_ops *compl_ops)
59 {
60         struct nfs_server *server = NFS_SERVER(inode);
61         const struct nfs_pageio_ops *pg_ops = &nfs_pageio_read_ops;
62
63 #ifdef CONFIG_NFS_V4_1
64         if (server->pnfs_curr_ld && !force_mds)
65                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
66 #endif
67         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
68                         server->rsize, 0);
69 }
70 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
71
72 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
73 {
74         pgio->pg_ops = &nfs_pageio_read_ops;
75         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
76 }
77 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
78
79 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
80                        struct page *page)
81 {
82         struct nfs_page *new;
83         unsigned int len;
84         struct nfs_pageio_descriptor pgio;
85
86         len = nfs_page_length(page);
87         if (len == 0)
88                 return nfs_return_empty_page(page);
89         new = nfs_create_request(ctx, inode, page, 0, len);
90         if (IS_ERR(new)) {
91                 unlock_page(page);
92                 return PTR_ERR(new);
93         }
94         if (len < PAGE_CACHE_SIZE)
95                 zero_user_segment(page, len, PAGE_CACHE_SIZE);
96
97         nfs_pageio_init_read(&pgio, inode, false,
98                              &nfs_async_read_completion_ops);
99         nfs_pageio_add_request(&pgio, new);
100         nfs_pageio_complete(&pgio);
101         NFS_I(inode)->read_io += pgio.pg_bytes_written;
102         return 0;
103 }
104
105 static void nfs_readpage_release(struct nfs_page *req)
106 {
107         struct inode *d_inode = req->wb_context->dentry->d_inode;
108
109         if (PageUptodate(req->wb_page))
110                 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
111
112         unlock_page(req->wb_page);
113
114         dprintk("NFS: read done (%s/%Lu %d@%Ld)\n",
115                         req->wb_context->dentry->d_inode->i_sb->s_id,
116                         (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
117                         req->wb_bytes,
118                         (long long)req_offset(req));
119         nfs_release_request(req);
120 }
121
122 /* Note io was page aligned */
123 static void nfs_read_completion(struct nfs_pgio_header *hdr)
124 {
125         unsigned long bytes = 0;
126
127         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
128                 goto out;
129         while (!list_empty(&hdr->pages)) {
130                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
131                 struct page *page = req->wb_page;
132
133                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
134                         if (bytes > hdr->good_bytes)
135                                 zero_user(page, 0, PAGE_SIZE);
136                         else if (hdr->good_bytes - bytes < PAGE_SIZE)
137                                 zero_user_segment(page,
138                                         hdr->good_bytes & ~PAGE_MASK,
139                                         PAGE_SIZE);
140                 }
141                 bytes += req->wb_bytes;
142                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
143                         if (bytes <= hdr->good_bytes)
144                                 SetPageUptodate(page);
145                 } else
146                         SetPageUptodate(page);
147                 nfs_list_remove_request(req);
148                 nfs_readpage_release(req);
149         }
150 out:
151         hdr->release(hdr);
152 }
153
154 int nfs_initiate_read(struct rpc_clnt *clnt,
155                       struct nfs_pgio_data *data,
156                       const struct rpc_call_ops *call_ops, int flags)
157 {
158         struct inode *inode = data->header->inode;
159         int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
160         struct rpc_task *task;
161         struct rpc_message msg = {
162                 .rpc_argp = &data->args,
163                 .rpc_resp = &data->res,
164                 .rpc_cred = data->header->cred,
165         };
166         struct rpc_task_setup task_setup_data = {
167                 .task = &data->task,
168                 .rpc_client = clnt,
169                 .rpc_message = &msg,
170                 .callback_ops = call_ops,
171                 .callback_data = data,
172                 .workqueue = nfsiod_workqueue,
173                 .flags = RPC_TASK_ASYNC | swap_flags | flags,
174         };
175
176         /* Set up the initial task struct. */
177         NFS_PROTO(inode)->read_setup(data, &msg);
178
179         dprintk("NFS: %5u initiated read call (req %s/%llu, %u bytes @ "
180                         "offset %llu)\n",
181                         data->task.tk_pid,
182                         inode->i_sb->s_id,
183                         (unsigned long long)NFS_FILEID(inode),
184                         data->args.count,
185                         (unsigned long long)data->args.offset);
186
187         task = rpc_run_task(&task_setup_data);
188         if (IS_ERR(task))
189                 return PTR_ERR(task);
190         rpc_put_task(task);
191         return 0;
192 }
193 EXPORT_SYMBOL_GPL(nfs_initiate_read);
194
195 static int nfs_do_read(struct nfs_pgio_data *data,
196                 const struct rpc_call_ops *call_ops)
197 {
198         struct inode *inode = data->header->inode;
199
200         return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
201 }
202
203 static int
204 nfs_do_multiple_reads(struct list_head *head,
205                 const struct rpc_call_ops *call_ops)
206 {
207         struct nfs_pgio_data *data;
208         int ret = 0;
209
210         while (!list_empty(head)) {
211                 int ret2;
212
213                 data = list_first_entry(head, struct nfs_pgio_data, list);
214                 list_del_init(&data->list);
215
216                 ret2 = nfs_do_read(data, call_ops);
217                 if (ret == 0)
218                         ret = ret2;
219         }
220         return ret;
221 }
222
223 static void
224 nfs_async_read_error(struct list_head *head)
225 {
226         struct nfs_page *req;
227
228         while (!list_empty(head)) {
229                 req = nfs_list_entry(head->next);
230                 nfs_list_remove_request(req);
231                 nfs_readpage_release(req);
232         }
233 }
234
235 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
236         .error_cleanup = nfs_async_read_error,
237         .completion = nfs_read_completion,
238 };
239
240 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
241 {
242         struct nfs_rw_header *rhdr;
243         struct nfs_pgio_header *hdr;
244         int ret;
245
246         rhdr = nfs_rw_header_alloc(desc->pg_rw_ops);
247         if (!rhdr) {
248                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
249                 return -ENOMEM;
250         }
251         hdr = &rhdr->header;
252         nfs_pgheader_init(desc, hdr, nfs_rw_header_free);
253         atomic_inc(&hdr->refcnt);
254         ret = nfs_generic_pgio(desc, hdr);
255         if (ret == 0)
256                 ret = nfs_do_multiple_reads(&hdr->rpc_list,
257                                             desc->pg_rpc_callops);
258         if (atomic_dec_and_test(&hdr->refcnt))
259                 hdr->completion_ops->completion(hdr);
260         return ret;
261 }
262
263 static const struct nfs_pageio_ops nfs_pageio_read_ops = {
264         .pg_test = nfs_generic_pg_test,
265         .pg_doio = nfs_generic_pg_readpages,
266 };
267
268 /*
269  * This is the callback from RPC telling us whether a reply was
270  * received or some error occurred (timeout or socket shutdown).
271  */
272 static int nfs_readpage_done(struct rpc_task *task, struct nfs_pgio_data *data,
273                              struct inode *inode)
274 {
275         int status = NFS_PROTO(inode)->read_done(task, data);
276         if (status != 0)
277                 return status;
278
279         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
280
281         if (task->tk_status == -ESTALE) {
282                 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
283                 nfs_mark_for_revalidate(inode);
284         }
285         return 0;
286 }
287
288 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_pgio_data *data)
289 {
290         struct nfs_pgio_args *argp = &data->args;
291         struct nfs_pgio_res  *resp = &data->res;
292
293         /* This is a short read! */
294         nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
295         /* Has the server at least made some progress? */
296         if (resp->count == 0) {
297                 nfs_set_pgio_error(data->header, -EIO, argp->offset);
298                 return;
299         }
300         /* Yes, so retry the read at the end of the data */
301         data->mds_offset += resp->count;
302         argp->offset += resp->count;
303         argp->pgbase += resp->count;
304         argp->count -= resp->count;
305         rpc_restart_call_prepare(task);
306 }
307
308 static void nfs_readpage_result(struct rpc_task *task, struct nfs_pgio_data *data)
309 {
310         struct nfs_pgio_header *hdr = data->header;
311
312         if (data->res.eof) {
313                 loff_t bound;
314
315                 bound = data->args.offset + data->res.count;
316                 spin_lock(&hdr->lock);
317                 if (bound < hdr->io_start + hdr->good_bytes) {
318                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
319                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
320                         hdr->good_bytes = bound - hdr->io_start;
321                 }
322                 spin_unlock(&hdr->lock);
323         } else if (data->res.count != data->args.count)
324                 nfs_readpage_retry(task, data);
325 }
326
327 /*
328  * Read a page over NFS.
329  * We read the page synchronously in the following case:
330  *  -   The error flag is set for this page. This happens only when a
331  *      previous async read operation failed.
332  */
333 int nfs_readpage(struct file *file, struct page *page)
334 {
335         struct nfs_open_context *ctx;
336         struct inode *inode = page_file_mapping(page)->host;
337         int             error;
338
339         dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
340                 page, PAGE_CACHE_SIZE, page_file_index(page));
341         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
342         nfs_add_stats(inode, NFSIOS_READPAGES, 1);
343
344         /*
345          * Try to flush any pending writes to the file..
346          *
347          * NOTE! Because we own the page lock, there cannot
348          * be any new pending writes generated at this point
349          * for this page (other pages can be written to).
350          */
351         error = nfs_wb_page(inode, page);
352         if (error)
353                 goto out_unlock;
354         if (PageUptodate(page))
355                 goto out_unlock;
356
357         error = -ESTALE;
358         if (NFS_STALE(inode))
359                 goto out_unlock;
360
361         if (file == NULL) {
362                 error = -EBADF;
363                 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
364                 if (ctx == NULL)
365                         goto out_unlock;
366         } else
367                 ctx = get_nfs_open_context(nfs_file_open_context(file));
368
369         if (!IS_SYNC(inode)) {
370                 error = nfs_readpage_from_fscache(ctx, inode, page);
371                 if (error == 0)
372                         goto out;
373         }
374
375         error = nfs_readpage_async(ctx, inode, page);
376
377 out:
378         put_nfs_open_context(ctx);
379         return error;
380 out_unlock:
381         unlock_page(page);
382         return error;
383 }
384
385 struct nfs_readdesc {
386         struct nfs_pageio_descriptor *pgio;
387         struct nfs_open_context *ctx;
388 };
389
390 static int
391 readpage_async_filler(void *data, struct page *page)
392 {
393         struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
394         struct inode *inode = page_file_mapping(page)->host;
395         struct nfs_page *new;
396         unsigned int len;
397         int error;
398
399         len = nfs_page_length(page);
400         if (len == 0)
401                 return nfs_return_empty_page(page);
402
403         new = nfs_create_request(desc->ctx, inode, page, 0, len);
404         if (IS_ERR(new))
405                 goto out_error;
406
407         if (len < PAGE_CACHE_SIZE)
408                 zero_user_segment(page, len, PAGE_CACHE_SIZE);
409         if (!nfs_pageio_add_request(desc->pgio, new)) {
410                 error = desc->pgio->pg_error;
411                 goto out_unlock;
412         }
413         return 0;
414 out_error:
415         error = PTR_ERR(new);
416 out_unlock:
417         unlock_page(page);
418         return error;
419 }
420
421 int nfs_readpages(struct file *filp, struct address_space *mapping,
422                 struct list_head *pages, unsigned nr_pages)
423 {
424         struct nfs_pageio_descriptor pgio;
425         struct nfs_readdesc desc = {
426                 .pgio = &pgio,
427         };
428         struct inode *inode = mapping->host;
429         unsigned long npages;
430         int ret = -ESTALE;
431
432         dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
433                         inode->i_sb->s_id,
434                         (unsigned long long)NFS_FILEID(inode),
435                         nr_pages);
436         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
437
438         if (NFS_STALE(inode))
439                 goto out;
440
441         if (filp == NULL) {
442                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
443                 if (desc.ctx == NULL)
444                         return -EBADF;
445         } else
446                 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
447
448         /* attempt to read as many of the pages as possible from the cache
449          * - this returns -ENOBUFS immediately if the cookie is negative
450          */
451         ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
452                                          pages, &nr_pages);
453         if (ret == 0)
454                 goto read_complete; /* all pages were read */
455
456         nfs_pageio_init_read(&pgio, inode, false,
457                              &nfs_async_read_completion_ops);
458
459         ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
460
461         nfs_pageio_complete(&pgio);
462         NFS_I(inode)->read_io += pgio.pg_bytes_written;
463         npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
464         nfs_add_stats(inode, NFSIOS_READPAGES, npages);
465 read_complete:
466         put_nfs_open_context(desc.ctx);
467 out:
468         return ret;
469 }
470
471 int __init nfs_init_readpagecache(void)
472 {
473         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
474                                              sizeof(struct nfs_rw_header),
475                                              0, SLAB_HWCACHE_ALIGN,
476                                              NULL);
477         if (nfs_rdata_cachep == NULL)
478                 return -ENOMEM;
479
480         return 0;
481 }
482
483 void nfs_destroy_readpagecache(void)
484 {
485         kmem_cache_destroy(nfs_rdata_cachep);
486 }
487
488 static const struct nfs_rw_ops nfs_rw_read_ops = {
489         .rw_mode                = FMODE_READ,
490         .rw_alloc_header        = nfs_readhdr_alloc,
491         .rw_free_header         = nfs_readhdr_free,
492         .rw_done                = nfs_readpage_done,
493         .rw_result              = nfs_readpage_result,
494 };