128d97f01d1ca39f8d05f7e555bd45c26d72f2ef
[cascardo/linux.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24
25 #include <asm/uaccess.h>
26
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33
34 #include "nfstrace.h"
35
36 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
37
38 #define MIN_POOL_WRITE          (32)
39 #define MIN_POOL_COMMIT         (4)
40
41 /*
42  * Local function declarations
43  */
44 static void nfs_redirty_request(struct nfs_page *req);
45 static const struct rpc_call_ops nfs_commit_ops;
46 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48 static const struct nfs_rw_ops nfs_rw_write_ops;
49 static void nfs_clear_request_commit(struct nfs_page *req);
50 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
51                                       struct inode *inode);
52 static struct nfs_page *
53 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
54                                                 struct page *page);
55
56 static struct kmem_cache *nfs_wdata_cachep;
57 static mempool_t *nfs_wdata_mempool;
58 static struct kmem_cache *nfs_cdata_cachep;
59 static mempool_t *nfs_commit_mempool;
60
61 struct nfs_commit_data *nfs_commitdata_alloc(void)
62 {
63         struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
64
65         if (p) {
66                 memset(p, 0, sizeof(*p));
67                 INIT_LIST_HEAD(&p->pages);
68         }
69         return p;
70 }
71 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
72
73 void nfs_commit_free(struct nfs_commit_data *p)
74 {
75         mempool_free(p, nfs_commit_mempool);
76 }
77 EXPORT_SYMBOL_GPL(nfs_commit_free);
78
79 static struct nfs_pgio_header *nfs_writehdr_alloc(void)
80 {
81         struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
82
83         if (p)
84                 memset(p, 0, sizeof(*p));
85         return p;
86 }
87
88 static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
89 {
90         mempool_free(hdr, nfs_wdata_mempool);
91 }
92
93 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
94 {
95         ctx->error = error;
96         smp_wmb();
97         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
98 }
99
100 /*
101  * nfs_page_find_head_request_locked - find head request associated with @page
102  *
103  * must be called while holding the inode lock.
104  *
105  * returns matching head request with reference held, or NULL if not found.
106  */
107 static struct nfs_page *
108 nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
109 {
110         struct nfs_page *req = NULL;
111
112         if (PagePrivate(page))
113                 req = (struct nfs_page *)page_private(page);
114         else if (unlikely(PageSwapCache(page)))
115                 req = nfs_page_search_commits_for_head_request_locked(nfsi,
116                         page);
117
118         if (req) {
119                 WARN_ON_ONCE(req->wb_head != req);
120                 kref_get(&req->wb_kref);
121         }
122
123         return req;
124 }
125
126 /*
127  * nfs_page_find_head_request - find head request associated with @page
128  *
129  * returns matching head request with reference held, or NULL if not found.
130  */
131 static struct nfs_page *nfs_page_find_head_request(struct page *page)
132 {
133         struct inode *inode = page_file_mapping(page)->host;
134         struct nfs_page *req = NULL;
135
136         spin_lock(&inode->i_lock);
137         req = nfs_page_find_head_request_locked(NFS_I(inode), page);
138         spin_unlock(&inode->i_lock);
139         return req;
140 }
141
142 /* Adjust the file length if we're writing beyond the end */
143 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
144 {
145         struct inode *inode = page_file_mapping(page)->host;
146         loff_t end, i_size;
147         pgoff_t end_index;
148
149         spin_lock(&inode->i_lock);
150         i_size = i_size_read(inode);
151         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
152         if (i_size > 0 && page_file_index(page) < end_index)
153                 goto out;
154         end = page_file_offset(page) + ((loff_t)offset+count);
155         if (i_size >= end)
156                 goto out;
157         i_size_write(inode, end);
158         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
159 out:
160         spin_unlock(&inode->i_lock);
161 }
162
163 /* A writeback failed: mark the page as bad, and invalidate the page cache */
164 static void nfs_set_pageerror(struct page *page)
165 {
166         nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
167 }
168
169 /*
170  * nfs_page_group_search_locked
171  * @head - head request of page group
172  * @page_offset - offset into page
173  *
174  * Search page group with head @head to find a request that contains the
175  * page offset @page_offset.
176  *
177  * Returns a pointer to the first matching nfs request, or NULL if no
178  * match is found.
179  *
180  * Must be called with the page group lock held
181  */
182 static struct nfs_page *
183 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
184 {
185         struct nfs_page *req;
186
187         WARN_ON_ONCE(head != head->wb_head);
188         WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
189
190         req = head;
191         do {
192                 if (page_offset >= req->wb_pgbase &&
193                     page_offset < (req->wb_pgbase + req->wb_bytes))
194                         return req;
195
196                 req = req->wb_this_page;
197         } while (req != head);
198
199         return NULL;
200 }
201
202 /*
203  * nfs_page_group_covers_page
204  * @head - head request of page group
205  *
206  * Return true if the page group with head @head covers the whole page,
207  * returns false otherwise
208  */
209 static bool nfs_page_group_covers_page(struct nfs_page *req)
210 {
211         struct nfs_page *tmp;
212         unsigned int pos = 0;
213         unsigned int len = nfs_page_length(req->wb_page);
214
215         nfs_page_group_lock(req, false);
216
217         do {
218                 tmp = nfs_page_group_search_locked(req->wb_head, pos);
219                 if (tmp) {
220                         /* no way this should happen */
221                         WARN_ON_ONCE(tmp->wb_pgbase != pos);
222                         pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
223                 }
224         } while (tmp && pos < len);
225
226         nfs_page_group_unlock(req);
227         WARN_ON_ONCE(pos > len);
228         return pos == len;
229 }
230
231 /* We can set the PG_uptodate flag if we see that a write request
232  * covers the full page.
233  */
234 static void nfs_mark_uptodate(struct nfs_page *req)
235 {
236         if (PageUptodate(req->wb_page))
237                 return;
238         if (!nfs_page_group_covers_page(req))
239                 return;
240         SetPageUptodate(req->wb_page);
241 }
242
243 static int wb_priority(struct writeback_control *wbc)
244 {
245         if (wbc->for_reclaim)
246                 return FLUSH_HIGHPRI | FLUSH_STABLE;
247         if (wbc->for_kupdate || wbc->for_background)
248                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
249         return FLUSH_COND_STABLE;
250 }
251
252 /*
253  * NFS congestion control
254  */
255
256 int nfs_congestion_kb;
257
258 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
259 #define NFS_CONGESTION_OFF_THRESH       \
260         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
261
262 static void nfs_set_page_writeback(struct page *page)
263 {
264         struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
265         int ret = test_set_page_writeback(page);
266
267         WARN_ON_ONCE(ret != 0);
268
269         if (atomic_long_inc_return(&nfss->writeback) >
270                         NFS_CONGESTION_ON_THRESH) {
271                 set_bdi_congested(&nfss->backing_dev_info,
272                                         BLK_RW_ASYNC);
273         }
274 }
275
276 static void nfs_end_page_writeback(struct nfs_page *req)
277 {
278         struct inode *inode = page_file_mapping(req->wb_page)->host;
279         struct nfs_server *nfss = NFS_SERVER(inode);
280
281         if (!nfs_page_group_sync_on_bit(req, PG_WB_END))
282                 return;
283
284         end_page_writeback(req->wb_page);
285         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
286                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
287 }
288
289
290 /* nfs_page_group_clear_bits
291  *   @req - an nfs request
292  * clears all page group related bits from @req
293  */
294 static void
295 nfs_page_group_clear_bits(struct nfs_page *req)
296 {
297         clear_bit(PG_TEARDOWN, &req->wb_flags);
298         clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
299         clear_bit(PG_UPTODATE, &req->wb_flags);
300         clear_bit(PG_WB_END, &req->wb_flags);
301         clear_bit(PG_REMOVE, &req->wb_flags);
302 }
303
304
305 /*
306  * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
307  *
308  * this is a helper function for nfs_lock_and_join_requests
309  *
310  * @inode - inode associated with request page group, must be holding inode lock
311  * @head  - head request of page group, must be holding head lock
312  * @req   - request that couldn't lock and needs to wait on the req bit lock
313  * @nonblock - if true, don't actually wait
314  *
315  * NOTE: this must be called holding page_group bit lock and inode spin lock
316  *       and BOTH will be released before returning.
317  *
318  * returns 0 on success, < 0 on error.
319  */
320 static int
321 nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
322                           struct nfs_page *req, bool nonblock)
323         __releases(&inode->i_lock)
324 {
325         struct nfs_page *tmp;
326         int ret;
327
328         /* relinquish all the locks successfully grabbed this run */
329         for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
330                 nfs_unlock_request(tmp);
331
332         WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
333
334         /* grab a ref on the request that will be waited on */
335         kref_get(&req->wb_kref);
336
337         nfs_page_group_unlock(head);
338         spin_unlock(&inode->i_lock);
339
340         /* release ref from nfs_page_find_head_request_locked */
341         nfs_release_request(head);
342
343         if (!nonblock)
344                 ret = nfs_wait_on_request(req);
345         else
346                 ret = -EAGAIN;
347         nfs_release_request(req);
348
349         return ret;
350 }
351
352 /*
353  * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
354  *
355  * @destroy_list - request list (using wb_this_page) terminated by @old_head
356  * @old_head - the old head of the list
357  *
358  * All subrequests must be locked and removed from all lists, so at this point
359  * they are only "active" in this function, and possibly in nfs_wait_on_request
360  * with a reference held by some other context.
361  */
362 static void
363 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
364                                  struct nfs_page *old_head)
365 {
366         while (destroy_list) {
367                 struct nfs_page *subreq = destroy_list;
368
369                 destroy_list = (subreq->wb_this_page == old_head) ?
370                                    NULL : subreq->wb_this_page;
371
372                 WARN_ON_ONCE(old_head != subreq->wb_head);
373
374                 /* make sure old group is not used */
375                 subreq->wb_head = subreq;
376                 subreq->wb_this_page = subreq;
377
378                 /* subreq is now totally disconnected from page group or any
379                  * write / commit lists. last chance to wake any waiters */
380                 nfs_unlock_request(subreq);
381
382                 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
383                         /* release ref on old head request */
384                         nfs_release_request(old_head);
385
386                         nfs_page_group_clear_bits(subreq);
387
388                         /* release the PG_INODE_REF reference */
389                         if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
390                                 nfs_release_request(subreq);
391                         else
392                                 WARN_ON_ONCE(1);
393                 } else {
394                         WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
395                         /* zombie requests have already released the last
396                          * reference and were waiting on the rest of the
397                          * group to complete. Since it's no longer part of a
398                          * group, simply free the request */
399                         nfs_page_group_clear_bits(subreq);
400                         nfs_free_request(subreq);
401                 }
402         }
403 }
404
405 /*
406  * nfs_lock_and_join_requests - join all subreqs to the head req and return
407  *                              a locked reference, cancelling any pending
408  *                              operations for this page.
409  *
410  * @page - the page used to lookup the "page group" of nfs_page structures
411  * @nonblock - if true, don't block waiting for request locks
412  *
413  * This function joins all sub requests to the head request by first
414  * locking all requests in the group, cancelling any pending operations
415  * and finally updating the head request to cover the whole range covered by
416  * the (former) group.  All subrequests are removed from any write or commit
417  * lists, unlinked from the group and destroyed.
418  *
419  * Returns a locked, referenced pointer to the head request - which after
420  * this call is guaranteed to be the only request associated with the page.
421  * Returns NULL if no requests are found for @page, or a ERR_PTR if an
422  * error was encountered.
423  */
424 static struct nfs_page *
425 nfs_lock_and_join_requests(struct page *page, bool nonblock)
426 {
427         struct inode *inode = page_file_mapping(page)->host;
428         struct nfs_page *head, *subreq;
429         struct nfs_page *destroy_list = NULL;
430         unsigned int total_bytes;
431         int ret;
432
433 try_again:
434         total_bytes = 0;
435
436         WARN_ON_ONCE(destroy_list);
437
438         spin_lock(&inode->i_lock);
439
440         /*
441          * A reference is taken only on the head request which acts as a
442          * reference to the whole page group - the group will not be destroyed
443          * until the head reference is released.
444          */
445         head = nfs_page_find_head_request_locked(NFS_I(inode), page);
446
447         if (!head) {
448                 spin_unlock(&inode->i_lock);
449                 return NULL;
450         }
451
452         /* holding inode lock, so always make a non-blocking call to try the
453          * page group lock */
454         ret = nfs_page_group_lock(head, true);
455         if (ret < 0) {
456                 spin_unlock(&inode->i_lock);
457
458                 if (!nonblock && ret == -EAGAIN) {
459                         nfs_page_group_lock_wait(head);
460                         nfs_release_request(head);
461                         goto try_again;
462                 }
463
464                 nfs_release_request(head);
465                 return ERR_PTR(ret);
466         }
467
468         /* lock each request in the page group */
469         subreq = head;
470         do {
471                 /*
472                  * Subrequests are always contiguous, non overlapping
473                  * and in order. If not, it's a programming error.
474                  */
475                 WARN_ON_ONCE(subreq->wb_offset !=
476                      (head->wb_offset + total_bytes));
477
478                 /* keep track of how many bytes this group covers */
479                 total_bytes += subreq->wb_bytes;
480
481                 if (!nfs_lock_request(subreq)) {
482                         /* releases page group bit lock and
483                          * inode spin lock and all references */
484                         ret = nfs_unroll_locks_and_wait(inode, head,
485                                 subreq, nonblock);
486
487                         if (ret == 0)
488                                 goto try_again;
489
490                         return ERR_PTR(ret);
491                 }
492
493                 subreq = subreq->wb_this_page;
494         } while (subreq != head);
495
496         /* Now that all requests are locked, make sure they aren't on any list.
497          * Commit list removal accounting is done after locks are dropped */
498         subreq = head;
499         do {
500                 nfs_clear_request_commit(subreq);
501                 subreq = subreq->wb_this_page;
502         } while (subreq != head);
503
504         /* unlink subrequests from head, destroy them later */
505         if (head->wb_this_page != head) {
506                 /* destroy list will be terminated by head */
507                 destroy_list = head->wb_this_page;
508                 head->wb_this_page = head;
509
510                 /* change head request to cover whole range that
511                  * the former page group covered */
512                 head->wb_bytes = total_bytes;
513         }
514
515         /*
516          * prepare head request to be added to new pgio descriptor
517          */
518         nfs_page_group_clear_bits(head);
519
520         /*
521          * some part of the group was still on the inode list - otherwise
522          * the group wouldn't be involved in async write.
523          * grab a reference for the head request, iff it needs one.
524          */
525         if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
526                 kref_get(&head->wb_kref);
527
528         nfs_page_group_unlock(head);
529
530         /* drop lock to clean uprequests on destroy list */
531         spin_unlock(&inode->i_lock);
532
533         nfs_destroy_unlinked_subrequests(destroy_list, head);
534
535         /* still holds ref on head from nfs_page_find_head_request_locked
536          * and still has lock on head from lock loop */
537         return head;
538 }
539
540 /*
541  * Find an associated nfs write request, and prepare to flush it out
542  * May return an error if the user signalled nfs_wait_on_request().
543  */
544 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
545                                 struct page *page, bool nonblock)
546 {
547         struct nfs_page *req;
548         int ret = 0;
549
550         req = nfs_lock_and_join_requests(page, nonblock);
551         if (!req)
552                 goto out;
553         ret = PTR_ERR(req);
554         if (IS_ERR(req))
555                 goto out;
556
557         nfs_set_page_writeback(page);
558         WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
559
560         ret = 0;
561         if (!nfs_pageio_add_request(pgio, req)) {
562                 nfs_redirty_request(req);
563                 ret = pgio->pg_error;
564         }
565 out:
566         return ret;
567 }
568
569 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
570 {
571         struct inode *inode = page_file_mapping(page)->host;
572         int ret;
573
574         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
575         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
576
577         nfs_pageio_cond_complete(pgio, page_file_index(page));
578         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
579         if (ret == -EAGAIN) {
580                 redirty_page_for_writepage(wbc, page);
581                 ret = 0;
582         }
583         return ret;
584 }
585
586 /*
587  * Write an mmapped page to the server.
588  */
589 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
590 {
591         struct nfs_pageio_descriptor pgio;
592         int err;
593
594         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
595                                 false, &nfs_async_write_completion_ops);
596         err = nfs_do_writepage(page, wbc, &pgio);
597         nfs_pageio_complete(&pgio);
598         if (err < 0)
599                 return err;
600         if (pgio.pg_error < 0)
601                 return pgio.pg_error;
602         return 0;
603 }
604
605 int nfs_writepage(struct page *page, struct writeback_control *wbc)
606 {
607         int ret;
608
609         ret = nfs_writepage_locked(page, wbc);
610         unlock_page(page);
611         return ret;
612 }
613
614 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
615 {
616         int ret;
617
618         ret = nfs_do_writepage(page, wbc, data);
619         unlock_page(page);
620         return ret;
621 }
622
623 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
624 {
625         struct inode *inode = mapping->host;
626         unsigned long *bitlock = &NFS_I(inode)->flags;
627         struct nfs_pageio_descriptor pgio;
628         int err;
629
630         /* Stop dirtying of new pages while we sync */
631         err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
632                         nfs_wait_bit_killable, TASK_KILLABLE);
633         if (err)
634                 goto out_err;
635
636         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
637
638         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
639                                 &nfs_async_write_completion_ops);
640         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
641         nfs_pageio_complete(&pgio);
642
643         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
644         smp_mb__after_atomic();
645         wake_up_bit(bitlock, NFS_INO_FLUSHING);
646
647         if (err < 0)
648                 goto out_err;
649         err = pgio.pg_error;
650         if (err < 0)
651                 goto out_err;
652         return 0;
653 out_err:
654         return err;
655 }
656
657 /*
658  * Insert a write request into an inode
659  */
660 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
661 {
662         struct nfs_inode *nfsi = NFS_I(inode);
663
664         WARN_ON_ONCE(req->wb_this_page != req);
665
666         /* Lock the request! */
667         nfs_lock_request(req);
668
669         spin_lock(&inode->i_lock);
670         if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
671                 inode->i_version++;
672         /*
673          * Swap-space should not get truncated. Hence no need to plug the race
674          * with invalidate/truncate.
675          */
676         if (likely(!PageSwapCache(req->wb_page))) {
677                 set_bit(PG_MAPPED, &req->wb_flags);
678                 SetPagePrivate(req->wb_page);
679                 set_page_private(req->wb_page, (unsigned long)req);
680         }
681         nfsi->npages++;
682         /* this a head request for a page group - mark it as having an
683          * extra reference so sub groups can follow suit */
684         WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
685         kref_get(&req->wb_kref);
686         spin_unlock(&inode->i_lock);
687 }
688
689 /*
690  * Remove a write request from an inode
691  */
692 static void nfs_inode_remove_request(struct nfs_page *req)
693 {
694         struct inode *inode = req->wb_context->dentry->d_inode;
695         struct nfs_inode *nfsi = NFS_I(inode);
696         struct nfs_page *head;
697
698         if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
699                 head = req->wb_head;
700
701                 spin_lock(&inode->i_lock);
702                 if (likely(!PageSwapCache(head->wb_page))) {
703                         set_page_private(head->wb_page, 0);
704                         ClearPagePrivate(head->wb_page);
705                         clear_bit(PG_MAPPED, &head->wb_flags);
706                 }
707                 nfsi->npages--;
708                 spin_unlock(&inode->i_lock);
709         }
710
711         if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
712                 nfs_release_request(req);
713         else
714                 WARN_ON_ONCE(1);
715 }
716
717 static void
718 nfs_mark_request_dirty(struct nfs_page *req)
719 {
720         __set_page_dirty_nobuffers(req->wb_page);
721 }
722
723 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
724 /*
725  * nfs_page_search_commits_for_head_request_locked
726  *
727  * Search through commit lists on @inode for the head request for @page.
728  * Must be called while holding the inode (which is cinfo) lock.
729  *
730  * Returns the head request if found, or NULL if not found.
731  */
732 static struct nfs_page *
733 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
734                                                 struct page *page)
735 {
736         struct nfs_page *freq, *t;
737         struct nfs_commit_info cinfo;
738         struct inode *inode = &nfsi->vfs_inode;
739
740         nfs_init_cinfo_from_inode(&cinfo, inode);
741
742         /* search through pnfs commit lists */
743         freq = pnfs_search_commit_reqs(inode, &cinfo, page);
744         if (freq)
745                 return freq->wb_head;
746
747         /* Linearly search the commit list for the correct request */
748         list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
749                 if (freq->wb_page == page)
750                         return freq->wb_head;
751         }
752
753         return NULL;
754 }
755
756 /**
757  * nfs_request_add_commit_list - add request to a commit list
758  * @req: pointer to a struct nfs_page
759  * @dst: commit list head
760  * @cinfo: holds list lock and accounting info
761  *
762  * This sets the PG_CLEAN bit, updates the cinfo count of
763  * number of outstanding requests requiring a commit as well as
764  * the MM page stats.
765  *
766  * The caller must _not_ hold the cinfo->lock, but must be
767  * holding the nfs_page lock.
768  */
769 void
770 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
771                             struct nfs_commit_info *cinfo)
772 {
773         set_bit(PG_CLEAN, &(req)->wb_flags);
774         spin_lock(cinfo->lock);
775         nfs_list_add_request(req, dst);
776         cinfo->mds->ncommit++;
777         spin_unlock(cinfo->lock);
778         if (!cinfo->dreq) {
779                 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
780                 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
781                              BDI_RECLAIMABLE);
782                 __mark_inode_dirty(req->wb_context->dentry->d_inode,
783                                    I_DIRTY_DATASYNC);
784         }
785 }
786 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
787
788 /**
789  * nfs_request_remove_commit_list - Remove request from a commit list
790  * @req: pointer to a nfs_page
791  * @cinfo: holds list lock and accounting info
792  *
793  * This clears the PG_CLEAN bit, and updates the cinfo's count of
794  * number of outstanding requests requiring a commit
795  * It does not update the MM page stats.
796  *
797  * The caller _must_ hold the cinfo->lock and the nfs_page lock.
798  */
799 void
800 nfs_request_remove_commit_list(struct nfs_page *req,
801                                struct nfs_commit_info *cinfo)
802 {
803         if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
804                 return;
805         nfs_list_remove_request(req);
806         cinfo->mds->ncommit--;
807 }
808 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
809
810 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
811                                       struct inode *inode)
812 {
813         cinfo->lock = &inode->i_lock;
814         cinfo->mds = &NFS_I(inode)->commit_info;
815         cinfo->ds = pnfs_get_ds_info(inode);
816         cinfo->dreq = NULL;
817         cinfo->completion_ops = &nfs_commit_completion_ops;
818 }
819
820 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
821                     struct inode *inode,
822                     struct nfs_direct_req *dreq)
823 {
824         if (dreq)
825                 nfs_init_cinfo_from_dreq(cinfo, dreq);
826         else
827                 nfs_init_cinfo_from_inode(cinfo, inode);
828 }
829 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
830
831 /*
832  * Add a request to the inode's commit list.
833  */
834 void
835 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
836                         struct nfs_commit_info *cinfo)
837 {
838         if (pnfs_mark_request_commit(req, lseg, cinfo))
839                 return;
840         nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
841 }
842
843 static void
844 nfs_clear_page_commit(struct page *page)
845 {
846         dec_zone_page_state(page, NR_UNSTABLE_NFS);
847         dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
848 }
849
850 /* Called holding inode (/cinfo) lock */
851 static void
852 nfs_clear_request_commit(struct nfs_page *req)
853 {
854         if (test_bit(PG_CLEAN, &req->wb_flags)) {
855                 struct inode *inode = req->wb_context->dentry->d_inode;
856                 struct nfs_commit_info cinfo;
857
858                 nfs_init_cinfo_from_inode(&cinfo, inode);
859                 if (!pnfs_clear_request_commit(req, &cinfo)) {
860                         nfs_request_remove_commit_list(req, &cinfo);
861                 }
862                 nfs_clear_page_commit(req->wb_page);
863         }
864 }
865
866 int nfs_write_need_commit(struct nfs_pgio_header *hdr)
867 {
868         if (hdr->verf.committed == NFS_DATA_SYNC)
869                 return hdr->lseg == NULL;
870         return hdr->verf.committed != NFS_FILE_SYNC;
871 }
872
873 #else
874 static struct nfs_page *
875 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
876                                                 struct page *page)
877 {
878         return NULL;
879 }
880
881 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
882                                       struct inode *inode)
883 {
884 }
885
886 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
887                     struct inode *inode,
888                     struct nfs_direct_req *dreq)
889 {
890 }
891
892 void
893 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
894                         struct nfs_commit_info *cinfo)
895 {
896 }
897
898 static void
899 nfs_clear_request_commit(struct nfs_page *req)
900 {
901 }
902
903 int nfs_write_need_commit(struct nfs_pgio_header *hdr)
904 {
905         return 0;
906 }
907
908 #endif
909
910 static void nfs_write_completion(struct nfs_pgio_header *hdr)
911 {
912         struct nfs_commit_info cinfo;
913         unsigned long bytes = 0;
914
915         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
916                 goto out;
917         nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
918         while (!list_empty(&hdr->pages)) {
919                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
920
921                 bytes += req->wb_bytes;
922                 nfs_list_remove_request(req);
923                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
924                     (hdr->good_bytes < bytes)) {
925                         nfs_set_pageerror(req->wb_page);
926                         nfs_context_set_write_error(req->wb_context, hdr->error);
927                         goto remove_req;
928                 }
929                 if (nfs_write_need_commit(hdr)) {
930                         memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
931                         nfs_mark_request_commit(req, hdr->lseg, &cinfo);
932                         goto next;
933                 }
934 remove_req:
935                 nfs_inode_remove_request(req);
936 next:
937                 nfs_unlock_request(req);
938                 nfs_end_page_writeback(req);
939                 nfs_release_request(req);
940         }
941 out:
942         hdr->release(hdr);
943 }
944
945 #if  IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
946 unsigned long
947 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
948 {
949         return cinfo->mds->ncommit;
950 }
951
952 /* cinfo->lock held by caller */
953 int
954 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
955                      struct nfs_commit_info *cinfo, int max)
956 {
957         struct nfs_page *req, *tmp;
958         int ret = 0;
959
960         list_for_each_entry_safe(req, tmp, src, wb_list) {
961                 if (!nfs_lock_request(req))
962                         continue;
963                 kref_get(&req->wb_kref);
964                 if (cond_resched_lock(cinfo->lock))
965                         list_safe_reset_next(req, tmp, wb_list);
966                 nfs_request_remove_commit_list(req, cinfo);
967                 nfs_list_add_request(req, dst);
968                 ret++;
969                 if ((ret == max) && !cinfo->dreq)
970                         break;
971         }
972         return ret;
973 }
974
975 /*
976  * nfs_scan_commit - Scan an inode for commit requests
977  * @inode: NFS inode to scan
978  * @dst: mds destination list
979  * @cinfo: mds and ds lists of reqs ready to commit
980  *
981  * Moves requests from the inode's 'commit' request list.
982  * The requests are *not* checked to ensure that they form a contiguous set.
983  */
984 int
985 nfs_scan_commit(struct inode *inode, struct list_head *dst,
986                 struct nfs_commit_info *cinfo)
987 {
988         int ret = 0;
989
990         spin_lock(cinfo->lock);
991         if (cinfo->mds->ncommit > 0) {
992                 const int max = INT_MAX;
993
994                 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
995                                            cinfo, max);
996                 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
997         }
998         spin_unlock(cinfo->lock);
999         return ret;
1000 }
1001
1002 #else
1003 unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1004 {
1005         return 0;
1006 }
1007
1008 int nfs_scan_commit(struct inode *inode, struct list_head *dst,
1009                     struct nfs_commit_info *cinfo)
1010 {
1011         return 0;
1012 }
1013 #endif
1014
1015 /*
1016  * Search for an existing write request, and attempt to update
1017  * it to reflect a new dirty region on a given page.
1018  *
1019  * If the attempt fails, then the existing request is flushed out
1020  * to disk.
1021  */
1022 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1023                 struct page *page,
1024                 unsigned int offset,
1025                 unsigned int bytes)
1026 {
1027         struct nfs_page *req;
1028         unsigned int rqend;
1029         unsigned int end;
1030         int error;
1031
1032         if (!PagePrivate(page))
1033                 return NULL;
1034
1035         end = offset + bytes;
1036         spin_lock(&inode->i_lock);
1037
1038         for (;;) {
1039                 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
1040                 if (req == NULL)
1041                         goto out_unlock;
1042
1043                 /* should be handled by nfs_flush_incompatible */
1044                 WARN_ON_ONCE(req->wb_head != req);
1045                 WARN_ON_ONCE(req->wb_this_page != req);
1046
1047                 rqend = req->wb_offset + req->wb_bytes;
1048                 /*
1049                  * Tell the caller to flush out the request if
1050                  * the offsets are non-contiguous.
1051                  * Note: nfs_flush_incompatible() will already
1052                  * have flushed out requests having wrong owners.
1053                  */
1054                 if (offset > rqend
1055                     || end < req->wb_offset)
1056                         goto out_flushme;
1057
1058                 if (nfs_lock_request(req))
1059                         break;
1060
1061                 /* The request is locked, so wait and then retry */
1062                 spin_unlock(&inode->i_lock);
1063                 error = nfs_wait_on_request(req);
1064                 nfs_release_request(req);
1065                 if (error != 0)
1066                         goto out_err;
1067                 spin_lock(&inode->i_lock);
1068         }
1069
1070         /* Okay, the request matches. Update the region */
1071         if (offset < req->wb_offset) {
1072                 req->wb_offset = offset;
1073                 req->wb_pgbase = offset;
1074         }
1075         if (end > rqend)
1076                 req->wb_bytes = end - req->wb_offset;
1077         else
1078                 req->wb_bytes = rqend - req->wb_offset;
1079 out_unlock:
1080         if (req)
1081                 nfs_clear_request_commit(req);
1082         spin_unlock(&inode->i_lock);
1083         return req;
1084 out_flushme:
1085         spin_unlock(&inode->i_lock);
1086         nfs_release_request(req);
1087         error = nfs_wb_page(inode, page);
1088 out_err:
1089         return ERR_PTR(error);
1090 }
1091
1092 /*
1093  * Try to update an existing write request, or create one if there is none.
1094  *
1095  * Note: Should always be called with the Page Lock held to prevent races
1096  * if we have to add a new request. Also assumes that the caller has
1097  * already called nfs_flush_incompatible() if necessary.
1098  */
1099 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1100                 struct page *page, unsigned int offset, unsigned int bytes)
1101 {
1102         struct inode *inode = page_file_mapping(page)->host;
1103         struct nfs_page *req;
1104
1105         req = nfs_try_to_update_request(inode, page, offset, bytes);
1106         if (req != NULL)
1107                 goto out;
1108         req = nfs_create_request(ctx, page, NULL, offset, bytes);
1109         if (IS_ERR(req))
1110                 goto out;
1111         nfs_inode_add_request(inode, req);
1112 out:
1113         return req;
1114 }
1115
1116 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1117                 unsigned int offset, unsigned int count)
1118 {
1119         struct nfs_page *req;
1120
1121         req = nfs_setup_write_request(ctx, page, offset, count);
1122         if (IS_ERR(req))
1123                 return PTR_ERR(req);
1124         /* Update file length */
1125         nfs_grow_file(page, offset, count);
1126         nfs_mark_uptodate(req);
1127         nfs_mark_request_dirty(req);
1128         nfs_unlock_and_release_request(req);
1129         return 0;
1130 }
1131
1132 int nfs_flush_incompatible(struct file *file, struct page *page)
1133 {
1134         struct nfs_open_context *ctx = nfs_file_open_context(file);
1135         struct nfs_lock_context *l_ctx;
1136         struct nfs_page *req;
1137         int do_flush, status;
1138         /*
1139          * Look for a request corresponding to this page. If there
1140          * is one, and it belongs to another file, we flush it out
1141          * before we try to copy anything into the page. Do this
1142          * due to the lack of an ACCESS-type call in NFSv2.
1143          * Also do the same if we find a request from an existing
1144          * dropped page.
1145          */
1146         do {
1147                 req = nfs_page_find_head_request(page);
1148                 if (req == NULL)
1149                         return 0;
1150                 l_ctx = req->wb_lock_context;
1151                 do_flush = req->wb_page != page || req->wb_context != ctx;
1152                 /* for now, flush if more than 1 request in page_group */
1153                 do_flush |= req->wb_this_page != req;
1154                 if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
1155                         do_flush |= l_ctx->lockowner.l_owner != current->files
1156                                 || l_ctx->lockowner.l_pid != current->tgid;
1157                 }
1158                 nfs_release_request(req);
1159                 if (!do_flush)
1160                         return 0;
1161                 status = nfs_wb_page(page_file_mapping(page)->host, page);
1162         } while (status == 0);
1163         return status;
1164 }
1165
1166 /*
1167  * Avoid buffered writes when a open context credential's key would
1168  * expire soon.
1169  *
1170  * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1171  *
1172  * Return 0 and set a credential flag which triggers the inode to flush
1173  * and performs  NFS_FILE_SYNC writes if the key will expired within
1174  * RPC_KEY_EXPIRE_TIMEO.
1175  */
1176 int
1177 nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1178 {
1179         struct nfs_open_context *ctx = nfs_file_open_context(filp);
1180         struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1181
1182         return rpcauth_key_timeout_notify(auth, ctx->cred);
1183 }
1184
1185 /*
1186  * Test if the open context credential key is marked to expire soon.
1187  */
1188 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
1189 {
1190         return rpcauth_cred_key_to_expire(ctx->cred);
1191 }
1192
1193 /*
1194  * If the page cache is marked as unsafe or invalid, then we can't rely on
1195  * the PageUptodate() flag. In this case, we will need to turn off
1196  * write optimisations that depend on the page contents being correct.
1197  */
1198 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1199 {
1200         struct nfs_inode *nfsi = NFS_I(inode);
1201
1202         if (nfs_have_delegated_attributes(inode))
1203                 goto out;
1204         if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1205                 return false;
1206         smp_rmb();
1207         if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1208                 return false;
1209 out:
1210         if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1211                 return false;
1212         return PageUptodate(page) != 0;
1213 }
1214
1215 /* If we know the page is up to date, and we're not using byte range locks (or
1216  * if we have the whole file locked for writing), it may be more efficient to
1217  * extend the write to cover the entire page in order to avoid fragmentation
1218  * inefficiencies.
1219  *
1220  * If the file is opened for synchronous writes then we can just skip the rest
1221  * of the checks.
1222  */
1223 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1224 {
1225         if (file->f_flags & O_DSYNC)
1226                 return 0;
1227         if (!nfs_write_pageuptodate(page, inode))
1228                 return 0;
1229         if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1230                 return 1;
1231         if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
1232                         inode->i_flock->fl_end == OFFSET_MAX &&
1233                         inode->i_flock->fl_type != F_RDLCK))
1234                 return 1;
1235         return 0;
1236 }
1237
1238 /*
1239  * Update and possibly write a cached page of an NFS file.
1240  *
1241  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1242  * things with a page scheduled for an RPC call (e.g. invalidate it).
1243  */
1244 int nfs_updatepage(struct file *file, struct page *page,
1245                 unsigned int offset, unsigned int count)
1246 {
1247         struct nfs_open_context *ctx = nfs_file_open_context(file);
1248         struct inode    *inode = page_file_mapping(page)->host;
1249         int             status = 0;
1250
1251         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1252
1253         dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
1254                 file, count, (long long)(page_file_offset(page) + offset));
1255
1256         if (nfs_can_extend_write(file, page, inode)) {
1257                 count = max(count + offset, nfs_page_length(page));
1258                 offset = 0;
1259         }
1260
1261         status = nfs_writepage_setup(ctx, page, offset, count);
1262         if (status < 0)
1263                 nfs_set_pageerror(page);
1264         else
1265                 __set_page_dirty_nobuffers(page);
1266
1267         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
1268                         status, (long long)i_size_read(inode));
1269         return status;
1270 }
1271
1272 static int flush_task_priority(int how)
1273 {
1274         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1275                 case FLUSH_HIGHPRI:
1276                         return RPC_PRIORITY_HIGH;
1277                 case FLUSH_LOWPRI:
1278                         return RPC_PRIORITY_LOW;
1279         }
1280         return RPC_PRIORITY_NORMAL;
1281 }
1282
1283 static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1284                                struct rpc_message *msg,
1285                                struct rpc_task_setup *task_setup_data, int how)
1286 {
1287         struct inode *inode = hdr->inode;
1288         int priority = flush_task_priority(how);
1289
1290         task_setup_data->priority = priority;
1291         NFS_PROTO(inode)->write_setup(hdr, msg);
1292
1293         nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
1294                                  &task_setup_data->rpc_client, msg, hdr);
1295 }
1296
1297 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1298  * call this on each, which will prepare them to be retried on next
1299  * writeback using standard nfs.
1300  */
1301 static void nfs_redirty_request(struct nfs_page *req)
1302 {
1303         nfs_mark_request_dirty(req);
1304         nfs_unlock_request(req);
1305         nfs_end_page_writeback(req);
1306         nfs_release_request(req);
1307 }
1308
1309 static void nfs_async_write_error(struct list_head *head)
1310 {
1311         struct nfs_page *req;
1312
1313         while (!list_empty(head)) {
1314                 req = nfs_list_entry(head->next);
1315                 nfs_list_remove_request(req);
1316                 nfs_redirty_request(req);
1317         }
1318 }
1319
1320 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1321         .error_cleanup = nfs_async_write_error,
1322         .completion = nfs_write_completion,
1323 };
1324
1325 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1326                                struct inode *inode, int ioflags, bool force_mds,
1327                                const struct nfs_pgio_completion_ops *compl_ops)
1328 {
1329         struct nfs_server *server = NFS_SERVER(inode);
1330         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1331
1332 #ifdef CONFIG_NFS_V4_1
1333         if (server->pnfs_curr_ld && !force_mds)
1334                 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1335 #endif
1336         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1337                         server->wsize, ioflags);
1338 }
1339 EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1340
1341 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1342 {
1343         pgio->pg_ops = &nfs_pgio_rw_ops;
1344         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1345 }
1346 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1347
1348
1349 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1350 {
1351         struct nfs_commit_data *data = calldata;
1352
1353         NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1354 }
1355
1356 static void nfs_writeback_release_common(struct nfs_pgio_header *hdr)
1357 {
1358         /* do nothing! */
1359 }
1360
1361 /*
1362  * Special version of should_remove_suid() that ignores capabilities.
1363  */
1364 static int nfs_should_remove_suid(const struct inode *inode)
1365 {
1366         umode_t mode = inode->i_mode;
1367         int kill = 0;
1368
1369         /* suid always must be killed */
1370         if (unlikely(mode & S_ISUID))
1371                 kill = ATTR_KILL_SUID;
1372
1373         /*
1374          * sgid without any exec bits is just a mandatory locking mark; leave
1375          * it alone.  If some exec bits are set, it's a real sgid; kill it.
1376          */
1377         if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1378                 kill |= ATTR_KILL_SGID;
1379
1380         if (unlikely(kill && S_ISREG(mode)))
1381                 return kill;
1382
1383         return 0;
1384 }
1385
1386 /*
1387  * This function is called when the WRITE call is complete.
1388  */
1389 static int nfs_writeback_done(struct rpc_task *task,
1390                               struct nfs_pgio_header *hdr,
1391                               struct inode *inode)
1392 {
1393         int status;
1394
1395         /*
1396          * ->write_done will attempt to use post-op attributes to detect
1397          * conflicting writes by other clients.  A strict interpretation
1398          * of close-to-open would allow us to continue caching even if
1399          * another writer had changed the file, but some applications
1400          * depend on tighter cache coherency when writing.
1401          */
1402         status = NFS_PROTO(inode)->write_done(task, hdr);
1403         if (status != 0)
1404                 return status;
1405         nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1406
1407 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1408         if (hdr->res.verf->committed < hdr->args.stable &&
1409             task->tk_status >= 0) {
1410                 /* We tried a write call, but the server did not
1411                  * commit data to stable storage even though we
1412                  * requested it.
1413                  * Note: There is a known bug in Tru64 < 5.0 in which
1414                  *       the server reports NFS_DATA_SYNC, but performs
1415                  *       NFS_FILE_SYNC. We therefore implement this checking
1416                  *       as a dprintk() in order to avoid filling syslog.
1417                  */
1418                 static unsigned long    complain;
1419
1420                 /* Note this will print the MDS for a DS write */
1421                 if (time_before(complain, jiffies)) {
1422                         dprintk("NFS:       faulty NFS server %s:"
1423                                 " (committed = %d) != (stable = %d)\n",
1424                                 NFS_SERVER(inode)->nfs_client->cl_hostname,
1425                                 hdr->res.verf->committed, hdr->args.stable);
1426                         complain = jiffies + 300 * HZ;
1427                 }
1428         }
1429 #endif
1430
1431         /* Deal with the suid/sgid bit corner case */
1432         if (nfs_should_remove_suid(inode))
1433                 nfs_mark_for_revalidate(inode);
1434         return 0;
1435 }
1436
1437 /*
1438  * This function is called when the WRITE call is complete.
1439  */
1440 static void nfs_writeback_result(struct rpc_task *task,
1441                                  struct nfs_pgio_header *hdr)
1442 {
1443         struct nfs_pgio_args    *argp = &hdr->args;
1444         struct nfs_pgio_res     *resp = &hdr->res;
1445
1446         if (resp->count < argp->count) {
1447                 static unsigned long    complain;
1448
1449                 /* This a short write! */
1450                 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1451
1452                 /* Has the server at least made some progress? */
1453                 if (resp->count == 0) {
1454                         if (time_before(complain, jiffies)) {
1455                                 printk(KERN_WARNING
1456                                        "NFS: Server wrote zero bytes, expected %u.\n",
1457                                        argp->count);
1458                                 complain = jiffies + 300 * HZ;
1459                         }
1460                         nfs_set_pgio_error(hdr, -EIO, argp->offset);
1461                         task->tk_status = -EIO;
1462                         return;
1463                 }
1464                 /* Was this an NFSv2 write or an NFSv3 stable write? */
1465                 if (resp->verf->committed != NFS_UNSTABLE) {
1466                         /* Resend from where the server left off */
1467                         hdr->mds_offset += resp->count;
1468                         argp->offset += resp->count;
1469                         argp->pgbase += resp->count;
1470                         argp->count -= resp->count;
1471                 } else {
1472                         /* Resend as a stable write in order to avoid
1473                          * headaches in the case of a server crash.
1474                          */
1475                         argp->stable = NFS_FILE_SYNC;
1476                 }
1477                 rpc_restart_call_prepare(task);
1478         }
1479 }
1480
1481
1482 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1483 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1484 {
1485         int ret;
1486
1487         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1488                 return 1;
1489         if (!may_wait)
1490                 return 0;
1491         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1492                                 NFS_INO_COMMIT,
1493                                 nfs_wait_bit_killable,
1494                                 TASK_KILLABLE);
1495         return (ret < 0) ? ret : 1;
1496 }
1497
1498 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1499 {
1500         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1501         smp_mb__after_atomic();
1502         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1503 }
1504
1505 void nfs_commitdata_release(struct nfs_commit_data *data)
1506 {
1507         put_nfs_open_context(data->context);
1508         nfs_commit_free(data);
1509 }
1510 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1511
1512 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1513                         const struct rpc_call_ops *call_ops,
1514                         int how, int flags)
1515 {
1516         struct rpc_task *task;
1517         int priority = flush_task_priority(how);
1518         struct rpc_message msg = {
1519                 .rpc_argp = &data->args,
1520                 .rpc_resp = &data->res,
1521                 .rpc_cred = data->cred,
1522         };
1523         struct rpc_task_setup task_setup_data = {
1524                 .task = &data->task,
1525                 .rpc_client = clnt,
1526                 .rpc_message = &msg,
1527                 .callback_ops = call_ops,
1528                 .callback_data = data,
1529                 .workqueue = nfsiod_workqueue,
1530                 .flags = RPC_TASK_ASYNC | flags,
1531                 .priority = priority,
1532         };
1533         /* Set up the initial task struct.  */
1534         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1535
1536         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1537
1538         nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1539                 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1540
1541         task = rpc_run_task(&task_setup_data);
1542         if (IS_ERR(task))
1543                 return PTR_ERR(task);
1544         if (how & FLUSH_SYNC)
1545                 rpc_wait_for_completion_task(task);
1546         rpc_put_task(task);
1547         return 0;
1548 }
1549 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1550
1551 static loff_t nfs_get_lwb(struct list_head *head)
1552 {
1553         loff_t lwb = 0;
1554         struct nfs_page *req;
1555
1556         list_for_each_entry(req, head, wb_list)
1557                 if (lwb < (req_offset(req) + req->wb_bytes))
1558                         lwb = req_offset(req) + req->wb_bytes;
1559
1560         return lwb;
1561 }
1562
1563 /*
1564  * Set up the argument/result storage required for the RPC call.
1565  */
1566 void nfs_init_commit(struct nfs_commit_data *data,
1567                      struct list_head *head,
1568                      struct pnfs_layout_segment *lseg,
1569                      struct nfs_commit_info *cinfo)
1570 {
1571         struct nfs_page *first = nfs_list_entry(head->next);
1572         struct inode *inode = first->wb_context->dentry->d_inode;
1573
1574         /* Set up the RPC argument and reply structs
1575          * NB: take care not to mess about with data->commit et al. */
1576
1577         list_splice_init(head, &data->pages);
1578
1579         data->inode       = inode;
1580         data->cred        = first->wb_context->cred;
1581         data->lseg        = lseg; /* reference transferred */
1582         /* only set lwb for pnfs commit */
1583         if (lseg)
1584                 data->lwb = nfs_get_lwb(&data->pages);
1585         data->mds_ops     = &nfs_commit_ops;
1586         data->completion_ops = cinfo->completion_ops;
1587         data->dreq        = cinfo->dreq;
1588
1589         data->args.fh     = NFS_FH(data->inode);
1590         /* Note: we always request a commit of the entire inode */
1591         data->args.offset = 0;
1592         data->args.count  = 0;
1593         data->context     = get_nfs_open_context(first->wb_context);
1594         data->res.fattr   = &data->fattr;
1595         data->res.verf    = &data->verf;
1596         nfs_fattr_init(&data->fattr);
1597 }
1598 EXPORT_SYMBOL_GPL(nfs_init_commit);
1599
1600 void nfs_retry_commit(struct list_head *page_list,
1601                       struct pnfs_layout_segment *lseg,
1602                       struct nfs_commit_info *cinfo)
1603 {
1604         struct nfs_page *req;
1605
1606         while (!list_empty(page_list)) {
1607                 req = nfs_list_entry(page_list->next);
1608                 nfs_list_remove_request(req);
1609                 nfs_mark_request_commit(req, lseg, cinfo);
1610                 if (!cinfo->dreq) {
1611                         dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1612                         dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
1613                                      BDI_RECLAIMABLE);
1614                 }
1615                 nfs_unlock_and_release_request(req);
1616         }
1617 }
1618 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1619
1620 /*
1621  * Commit dirty pages
1622  */
1623 static int
1624 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1625                 struct nfs_commit_info *cinfo)
1626 {
1627         struct nfs_commit_data  *data;
1628
1629         data = nfs_commitdata_alloc();
1630
1631         if (!data)
1632                 goto out_bad;
1633
1634         /* Set up the argument struct */
1635         nfs_init_commit(data, head, NULL, cinfo);
1636         atomic_inc(&cinfo->mds->rpcs_out);
1637         return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
1638                                    how, 0);
1639  out_bad:
1640         nfs_retry_commit(head, NULL, cinfo);
1641         cinfo->completion_ops->error_cleanup(NFS_I(inode));
1642         return -ENOMEM;
1643 }
1644
1645 /*
1646  * COMMIT call returned
1647  */
1648 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1649 {
1650         struct nfs_commit_data  *data = calldata;
1651
1652         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1653                                 task->tk_pid, task->tk_status);
1654
1655         /* Call the NFS version-specific code */
1656         NFS_PROTO(data->inode)->commit_done(task, data);
1657 }
1658
1659 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1660 {
1661         struct nfs_page *req;
1662         int status = data->task.tk_status;
1663         struct nfs_commit_info cinfo;
1664
1665         while (!list_empty(&data->pages)) {
1666                 req = nfs_list_entry(data->pages.next);
1667                 nfs_list_remove_request(req);
1668                 nfs_clear_page_commit(req->wb_page);
1669
1670                 dprintk("NFS:       commit (%s/%llu %d@%lld)",
1671                         req->wb_context->dentry->d_sb->s_id,
1672                         (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1673                         req->wb_bytes,
1674                         (long long)req_offset(req));
1675                 if (status < 0) {
1676                         nfs_context_set_write_error(req->wb_context, status);
1677                         nfs_inode_remove_request(req);
1678                         dprintk(", error = %d\n", status);
1679                         goto next;
1680                 }
1681
1682                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1683                  * returned by the server against all stored verfs. */
1684                 if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
1685                         /* We have a match */
1686                         nfs_inode_remove_request(req);
1687                         dprintk(" OK\n");
1688                         goto next;
1689                 }
1690                 /* We have a mismatch. Write the page again */
1691                 dprintk(" mismatch\n");
1692                 nfs_mark_request_dirty(req);
1693                 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1694         next:
1695                 nfs_unlock_and_release_request(req);
1696         }
1697         nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1698         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
1699                 nfs_commit_clear_lock(NFS_I(data->inode));
1700 }
1701
1702 static void nfs_commit_release(void *calldata)
1703 {
1704         struct nfs_commit_data *data = calldata;
1705
1706         data->completion_ops->completion(data);
1707         nfs_commitdata_release(calldata);
1708 }
1709
1710 static const struct rpc_call_ops nfs_commit_ops = {
1711         .rpc_call_prepare = nfs_commit_prepare,
1712         .rpc_call_done = nfs_commit_done,
1713         .rpc_release = nfs_commit_release,
1714 };
1715
1716 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1717         .completion = nfs_commit_release_pages,
1718         .error_cleanup = nfs_commit_clear_lock,
1719 };
1720
1721 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1722                             int how, struct nfs_commit_info *cinfo)
1723 {
1724         int status;
1725
1726         status = pnfs_commit_list(inode, head, how, cinfo);
1727         if (status == PNFS_NOT_ATTEMPTED)
1728                 status = nfs_commit_list(inode, head, how, cinfo);
1729         return status;
1730 }
1731
1732 int nfs_commit_inode(struct inode *inode, int how)
1733 {
1734         LIST_HEAD(head);
1735         struct nfs_commit_info cinfo;
1736         int may_wait = how & FLUSH_SYNC;
1737         int res;
1738
1739         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1740         if (res <= 0)
1741                 goto out_mark_dirty;
1742         nfs_init_cinfo_from_inode(&cinfo, inode);
1743         res = nfs_scan_commit(inode, &head, &cinfo);
1744         if (res) {
1745                 int error;
1746
1747                 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1748                 if (error < 0)
1749                         return error;
1750                 if (!may_wait)
1751                         goto out_mark_dirty;
1752                 error = wait_on_bit_action(&NFS_I(inode)->flags,
1753                                 NFS_INO_COMMIT,
1754                                 nfs_wait_bit_killable,
1755                                 TASK_KILLABLE);
1756                 if (error < 0)
1757                         return error;
1758         } else
1759                 nfs_commit_clear_lock(NFS_I(inode));
1760         return res;
1761         /* Note: If we exit without ensuring that the commit is complete,
1762          * we must mark the inode as dirty. Otherwise, future calls to
1763          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1764          * that the data is on the disk.
1765          */
1766 out_mark_dirty:
1767         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1768         return res;
1769 }
1770
1771 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1772 {
1773         struct nfs_inode *nfsi = NFS_I(inode);
1774         int flags = FLUSH_SYNC;
1775         int ret = 0;
1776
1777         /* no commits means nothing needs to be done */
1778         if (!nfsi->commit_info.ncommit)
1779                 return ret;
1780
1781         if (wbc->sync_mode == WB_SYNC_NONE) {
1782                 /* Don't commit yet if this is a non-blocking flush and there
1783                  * are a lot of outstanding writes for this mapping.
1784                  */
1785                 if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
1786                         goto out_mark_dirty;
1787
1788                 /* don't wait for the COMMIT response */
1789                 flags = 0;
1790         }
1791
1792         ret = nfs_commit_inode(inode, flags);
1793         if (ret >= 0) {
1794                 if (wbc->sync_mode == WB_SYNC_NONE) {
1795                         if (ret < wbc->nr_to_write)
1796                                 wbc->nr_to_write -= ret;
1797                         else
1798                                 wbc->nr_to_write = 0;
1799                 }
1800                 return 0;
1801         }
1802 out_mark_dirty:
1803         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1804         return ret;
1805 }
1806 #else
1807 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1808 {
1809         return 0;
1810 }
1811 #endif
1812
1813 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1814 {
1815         return nfs_commit_unstable_pages(inode, wbc);
1816 }
1817 EXPORT_SYMBOL_GPL(nfs_write_inode);
1818
1819 /*
1820  * flush the inode to disk.
1821  */
1822 int nfs_wb_all(struct inode *inode)
1823 {
1824         struct writeback_control wbc = {
1825                 .sync_mode = WB_SYNC_ALL,
1826                 .nr_to_write = LONG_MAX,
1827                 .range_start = 0,
1828                 .range_end = LLONG_MAX,
1829         };
1830         int ret;
1831
1832         trace_nfs_writeback_inode_enter(inode);
1833
1834         ret = sync_inode(inode, &wbc);
1835
1836         trace_nfs_writeback_inode_exit(inode, ret);
1837         return ret;
1838 }
1839 EXPORT_SYMBOL_GPL(nfs_wb_all);
1840
1841 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1842 {
1843         struct nfs_page *req;
1844         int ret = 0;
1845
1846         wait_on_page_writeback(page);
1847
1848         /* blocking call to cancel all requests and join to a single (head)
1849          * request */
1850         req = nfs_lock_and_join_requests(page, false);
1851
1852         if (IS_ERR(req)) {
1853                 ret = PTR_ERR(req);
1854         } else if (req) {
1855                 /* all requests from this page have been cancelled by
1856                  * nfs_lock_and_join_requests, so just remove the head
1857                  * request from the inode / page_private pointer and
1858                  * release it */
1859                 nfs_inode_remove_request(req);
1860                 /*
1861                  * In case nfs_inode_remove_request has marked the
1862                  * page as being dirty
1863                  */
1864                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1865                 nfs_unlock_and_release_request(req);
1866         }
1867
1868         return ret;
1869 }
1870
1871 /*
1872  * Write back all requests on one page - we do this before reading it.
1873  */
1874 int nfs_wb_page(struct inode *inode, struct page *page)
1875 {
1876         loff_t range_start = page_file_offset(page);
1877         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1878         struct writeback_control wbc = {
1879                 .sync_mode = WB_SYNC_ALL,
1880                 .nr_to_write = 0,
1881                 .range_start = range_start,
1882                 .range_end = range_end,
1883         };
1884         int ret;
1885
1886         trace_nfs_writeback_page_enter(inode);
1887
1888         for (;;) {
1889                 wait_on_page_writeback(page);
1890                 if (clear_page_dirty_for_io(page)) {
1891                         ret = nfs_writepage_locked(page, &wbc);
1892                         if (ret < 0)
1893                                 goto out_error;
1894                         continue;
1895                 }
1896                 ret = 0;
1897                 if (!PagePrivate(page))
1898                         break;
1899                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1900                 if (ret < 0)
1901                         goto out_error;
1902         }
1903 out_error:
1904         trace_nfs_writeback_page_exit(inode, ret);
1905         return ret;
1906 }
1907
1908 #ifdef CONFIG_MIGRATION
1909 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1910                 struct page *page, enum migrate_mode mode)
1911 {
1912         /*
1913          * If PagePrivate is set, then the page is currently associated with
1914          * an in-progress read or write request. Don't try to migrate it.
1915          *
1916          * FIXME: we could do this in principle, but we'll need a way to ensure
1917          *        that we can safely release the inode reference while holding
1918          *        the page lock.
1919          */
1920         if (PagePrivate(page))
1921                 return -EBUSY;
1922
1923         if (!nfs_fscache_release_page(page, GFP_KERNEL))
1924                 return -EBUSY;
1925
1926         return migrate_page(mapping, newpage, page, mode);
1927 }
1928 #endif
1929
1930 int __init nfs_init_writepagecache(void)
1931 {
1932         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1933                                              sizeof(struct nfs_pgio_header),
1934                                              0, SLAB_HWCACHE_ALIGN,
1935                                              NULL);
1936         if (nfs_wdata_cachep == NULL)
1937                 return -ENOMEM;
1938
1939         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1940                                                      nfs_wdata_cachep);
1941         if (nfs_wdata_mempool == NULL)
1942                 goto out_destroy_write_cache;
1943
1944         nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1945                                              sizeof(struct nfs_commit_data),
1946                                              0, SLAB_HWCACHE_ALIGN,
1947                                              NULL);
1948         if (nfs_cdata_cachep == NULL)
1949                 goto out_destroy_write_mempool;
1950
1951         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1952                                                       nfs_cdata_cachep);
1953         if (nfs_commit_mempool == NULL)
1954                 goto out_destroy_commit_cache;
1955
1956         /*
1957          * NFS congestion size, scale with available memory.
1958          *
1959          *  64MB:    8192k
1960          * 128MB:   11585k
1961          * 256MB:   16384k
1962          * 512MB:   23170k
1963          *   1GB:   32768k
1964          *   2GB:   46340k
1965          *   4GB:   65536k
1966          *   8GB:   92681k
1967          *  16GB:  131072k
1968          *
1969          * This allows larger machines to have larger/more transfers.
1970          * Limit the default to 256M
1971          */
1972         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1973         if (nfs_congestion_kb > 256*1024)
1974                 nfs_congestion_kb = 256*1024;
1975
1976         return 0;
1977
1978 out_destroy_commit_cache:
1979         kmem_cache_destroy(nfs_cdata_cachep);
1980 out_destroy_write_mempool:
1981         mempool_destroy(nfs_wdata_mempool);
1982 out_destroy_write_cache:
1983         kmem_cache_destroy(nfs_wdata_cachep);
1984         return -ENOMEM;
1985 }
1986
1987 void nfs_destroy_writepagecache(void)
1988 {
1989         mempool_destroy(nfs_commit_mempool);
1990         kmem_cache_destroy(nfs_cdata_cachep);
1991         mempool_destroy(nfs_wdata_mempool);
1992         kmem_cache_destroy(nfs_wdata_cachep);
1993 }
1994
1995 static const struct nfs_rw_ops nfs_rw_write_ops = {
1996         .rw_mode                = FMODE_WRITE,
1997         .rw_alloc_header        = nfs_writehdr_alloc,
1998         .rw_free_header         = nfs_writehdr_free,
1999         .rw_release             = nfs_writeback_release_common,
2000         .rw_done                = nfs_writeback_done,
2001         .rw_result              = nfs_writeback_result,
2002         .rw_initiate            = nfs_initiate_write,
2003 };