2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
40 * structure owned by writepages passed to individual writepage calls
42 struct xfs_writepage_ctx {
43 struct xfs_bmbt_irec imap;
46 struct xfs_ioend *ioend;
56 struct buffer_head *bh, *head;
58 *delalloc = *unwritten = 0;
60 bh = head = page_buffers(page);
62 if (buffer_unwritten(bh))
64 else if (buffer_delay(bh))
66 } while ((bh = bh->b_this_page) != head);
69 STATIC struct block_device *
70 xfs_find_bdev_for_inode(
73 struct xfs_inode *ip = XFS_I(inode);
74 struct xfs_mount *mp = ip->i_mount;
76 if (XFS_IS_REALTIME_INODE(ip))
77 return mp->m_rtdev_targp->bt_bdev;
79 return mp->m_ddev_targp->bt_bdev;
83 * We're now finished for good with this ioend structure.
84 * Update the page state via the associated buffer_heads,
85 * release holds on the inode and bio, and finally free
86 * up memory. Do not use the ioend after this.
92 struct buffer_head *bh, *next;
94 for (bh = ioend->io_buffer_head; bh; bh = next) {
96 bh->b_end_io(bh, !ioend->io_error);
99 mempool_free(ioend, xfs_ioend_pool);
103 * Fast and loose check if this write could update the on-disk inode size.
105 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
107 return ioend->io_offset + ioend->io_size >
108 XFS_I(ioend->io_inode)->i_d.di_size;
112 xfs_setfilesize_trans_alloc(
113 struct xfs_ioend *ioend)
115 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
116 struct xfs_trans *tp;
119 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
121 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
123 xfs_trans_cancel(tp);
127 ioend->io_append_trans = tp;
130 * We may pass freeze protection with a transaction. So tell lockdep
133 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
135 * We hand off the transaction to the completion thread now, so
136 * clear the flag here.
138 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
143 * Update on-disk file size now that data has been written to disk.
147 struct xfs_inode *ip,
148 struct xfs_trans *tp,
154 xfs_ilock(ip, XFS_ILOCK_EXCL);
155 isize = xfs_new_eof(ip, offset + size);
157 xfs_iunlock(ip, XFS_ILOCK_EXCL);
158 xfs_trans_cancel(tp);
162 trace_xfs_setfilesize(ip, offset, size);
164 ip->i_d.di_size = isize;
165 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
166 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
168 return xfs_trans_commit(tp);
172 xfs_setfilesize_ioend(
173 struct xfs_ioend *ioend)
175 struct xfs_inode *ip = XFS_I(ioend->io_inode);
176 struct xfs_trans *tp = ioend->io_append_trans;
179 * The transaction may have been allocated in the I/O submission thread,
180 * thus we need to mark ourselves as being in a transaction manually.
181 * Similarly for freeze protection.
183 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
184 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
186 /* we abort the update if there was an IO error */
187 if (ioend->io_error) {
188 xfs_trans_cancel(tp);
189 return ioend->io_error;
192 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
196 * Schedule IO completion handling on the final put of an ioend.
198 * If there is no work to do we might as well call it a day and free the
203 struct xfs_ioend *ioend)
205 if (atomic_dec_and_test(&ioend->io_remaining)) {
206 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
208 if (ioend->io_type == XFS_IO_UNWRITTEN)
209 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
210 else if (ioend->io_append_trans)
211 queue_work(mp->m_data_workqueue, &ioend->io_work);
213 xfs_destroy_ioend(ioend);
218 * IO write completion.
222 struct work_struct *work)
224 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
225 struct xfs_inode *ip = XFS_I(ioend->io_inode);
228 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
229 ioend->io_error = -EIO;
234 * For unwritten extents we need to issue transactions to convert a
235 * range to normal written extens after the data I/O has finished.
236 * Detecting and handling completion IO errors is done individually
237 * for each case as different cleanup operations need to be performed
240 if (ioend->io_type == XFS_IO_UNWRITTEN) {
243 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
245 } else if (ioend->io_append_trans) {
246 error = xfs_setfilesize_ioend(ioend);
248 ASSERT(!xfs_ioend_is_append(ioend));
253 ioend->io_error = error;
254 xfs_destroy_ioend(ioend);
258 * Allocate and initialise an IO completion structure.
259 * We need to track unwritten extent write completion here initially.
260 * We'll need to extend this for updating the ondisk inode size later
270 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
273 * Set the count to 1 initially, which will prevent an I/O
274 * completion callback from happening before we have started
275 * all the I/O from calling the completion routine too early.
277 atomic_set(&ioend->io_remaining, 1);
279 INIT_LIST_HEAD(&ioend->io_list);
280 ioend->io_type = type;
281 ioend->io_inode = inode;
282 ioend->io_buffer_head = NULL;
283 ioend->io_buffer_tail = NULL;
284 ioend->io_offset = 0;
286 ioend->io_append_trans = NULL;
288 INIT_WORK(&ioend->io_work, xfs_end_io);
296 struct xfs_bmbt_irec *imap,
299 struct xfs_inode *ip = XFS_I(inode);
300 struct xfs_mount *mp = ip->i_mount;
301 ssize_t count = 1 << inode->i_blkbits;
302 xfs_fileoff_t offset_fsb, end_fsb;
304 int bmapi_flags = XFS_BMAPI_ENTIRE;
307 if (XFS_FORCED_SHUTDOWN(mp))
310 if (type == XFS_IO_UNWRITTEN)
311 bmapi_flags |= XFS_BMAPI_IGSTATE;
313 xfs_ilock(ip, XFS_ILOCK_SHARED);
314 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
315 (ip->i_df.if_flags & XFS_IFEXTENTS));
316 ASSERT(offset <= mp->m_super->s_maxbytes);
318 if (offset + count > mp->m_super->s_maxbytes)
319 count = mp->m_super->s_maxbytes - offset;
320 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
321 offset_fsb = XFS_B_TO_FSBT(mp, offset);
322 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
323 imap, &nimaps, bmapi_flags);
324 xfs_iunlock(ip, XFS_ILOCK_SHARED);
329 if (type == XFS_IO_DELALLOC &&
330 (!nimaps || isnullstartblock(imap->br_startblock))) {
331 error = xfs_iomap_write_allocate(ip, offset, imap);
333 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
338 if (type == XFS_IO_UNWRITTEN) {
340 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
341 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
345 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
352 struct xfs_bmbt_irec *imap,
355 offset >>= inode->i_blkbits;
357 return offset >= imap->br_startoff &&
358 offset < imap->br_startoff + imap->br_blockcount;
362 * BIO completion handler for buffered IO.
368 xfs_ioend_t *ioend = bio->bi_private;
370 if (!ioend->io_error)
371 ioend->io_error = bio->bi_error;
373 /* Toss bio and pass work off to an xfsdatad thread */
374 bio->bi_private = NULL;
375 bio->bi_end_io = NULL;
378 xfs_finish_ioend(ioend);
382 xfs_submit_ioend_bio(
383 struct writeback_control *wbc,
387 atomic_inc(&ioend->io_remaining);
388 bio->bi_private = ioend;
389 bio->bi_end_io = xfs_end_bio;
390 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
395 struct buffer_head *bh)
397 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
399 ASSERT(bio->bi_private == NULL);
400 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
401 bio->bi_bdev = bh->b_bdev;
406 xfs_start_buffer_writeback(
407 struct buffer_head *bh)
409 ASSERT(buffer_mapped(bh));
410 ASSERT(buffer_locked(bh));
411 ASSERT(!buffer_delay(bh));
412 ASSERT(!buffer_unwritten(bh));
414 mark_buffer_async_write(bh);
415 set_buffer_uptodate(bh);
416 clear_buffer_dirty(bh);
420 xfs_start_page_writeback(
424 ASSERT(PageLocked(page));
425 ASSERT(!PageWriteback(page));
428 * if the page was not fully cleaned, we need to ensure that the higher
429 * layers come back to it correctly. That means we need to keep the page
430 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
431 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
432 * write this page in this writeback sweep will be made.
435 clear_page_dirty_for_io(page);
436 set_page_writeback(page);
438 set_page_writeback_keepwrite(page);
443 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
445 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
449 * Submit all of the bios for an ioend. We are only passed a single ioend at a
450 * time; the caller is responsible for chaining prior to submission.
452 * If @fail is non-zero, it means that we have a situation where some part of
453 * the submission process has failed after we have marked paged for writeback
454 * and unlocked them. In this situation, we need to fail the ioend chain rather
455 * than submit it to IO. This typically only happens on a filesystem shutdown.
459 struct writeback_control *wbc,
463 struct buffer_head *bh;
465 sector_t lastblock = 0;
467 /* Reserve log space if we might write beyond the on-disk inode size. */
469 ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
470 status = xfs_setfilesize_trans_alloc(ioend);
472 * If we are failing the IO now, just mark the ioend with an
473 * error and finish it. This will run IO completion immediately
474 * as there is only one reference to the ioend at this point in
478 ioend->io_error = status;
479 xfs_finish_ioend(ioend);
484 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
488 bio = xfs_alloc_ioend_bio(bh);
489 } else if (bh->b_blocknr != lastblock + 1) {
490 xfs_submit_ioend_bio(wbc, ioend, bio);
494 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
495 xfs_submit_ioend_bio(wbc, ioend, bio);
499 lastblock = bh->b_blocknr;
502 xfs_submit_ioend_bio(wbc, ioend, bio);
503 xfs_finish_ioend(ioend);
508 * Test to see if we've been building up a completion structure for
509 * earlier buffers -- if so, we try to append to this ioend if we
510 * can, otherwise we finish off any current ioend and start another.
511 * Return the ioend we finished off so that the caller can submit it
512 * once it has finished processing the dirty page.
517 struct buffer_head *bh,
519 struct xfs_writepage_ctx *wpc,
520 struct list_head *iolist)
522 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
523 bh->b_blocknr != wpc->last_block + 1) {
524 struct xfs_ioend *new;
527 list_add(&wpc->ioend->io_list, iolist);
529 new = xfs_alloc_ioend(inode, wpc->io_type);
530 new->io_offset = offset;
531 new->io_buffer_head = bh;
532 new->io_buffer_tail = bh;
535 wpc->ioend->io_buffer_tail->b_private = bh;
536 wpc->ioend->io_buffer_tail = bh;
539 bh->b_private = NULL;
540 wpc->ioend->io_size += bh->b_size;
541 wpc->last_block = bh->b_blocknr;
542 xfs_start_buffer_writeback(bh);
548 struct buffer_head *bh,
549 struct xfs_bmbt_irec *imap,
553 struct xfs_mount *m = XFS_I(inode)->i_mount;
554 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
555 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
557 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
558 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
560 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
561 ((offset - iomap_offset) >> inode->i_blkbits);
563 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
566 set_buffer_mapped(bh);
572 struct buffer_head *bh,
573 struct xfs_bmbt_irec *imap,
576 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
577 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
579 xfs_map_buffer(inode, bh, imap, offset);
580 set_buffer_mapped(bh);
581 clear_buffer_delay(bh);
582 clear_buffer_unwritten(bh);
586 * Test if a given page contains at least one buffer of a given @type.
587 * If @check_all_buffers is true, then we walk all the buffers in the page to
588 * try to find one of the type passed in. If it is not set, then the caller only
589 * needs to check the first buffer on the page for a match.
595 bool check_all_buffers)
597 struct buffer_head *bh;
598 struct buffer_head *head;
600 if (PageWriteback(page))
604 if (!page_has_buffers(page))
607 bh = head = page_buffers(page);
609 if (buffer_unwritten(bh)) {
610 if (type == XFS_IO_UNWRITTEN)
612 } else if (buffer_delay(bh)) {
613 if (type == XFS_IO_DELALLOC)
615 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
616 if (type == XFS_IO_OVERWRITE)
620 /* If we are only checking the first buffer, we are done now. */
621 if (!check_all_buffers)
623 } while ((bh = bh->b_this_page) != head);
629 xfs_vm_invalidatepage(
634 trace_xfs_invalidatepage(page->mapping->host, page, offset,
636 block_invalidatepage(page, offset, length);
640 * If the page has delalloc buffers on it, we need to punch them out before we
641 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
642 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
643 * is done on that same region - the delalloc extent is returned when none is
644 * supposed to be there.
646 * We prevent this by truncating away the delalloc regions on the page before
647 * invalidating it. Because they are delalloc, we can do this without needing a
648 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
649 * truncation without a transaction as there is no space left for block
650 * reservation (typically why we see a ENOSPC in writeback).
652 * This is not a performance critical path, so for now just do the punching a
653 * buffer head at a time.
656 xfs_aops_discard_page(
659 struct inode *inode = page->mapping->host;
660 struct xfs_inode *ip = XFS_I(inode);
661 struct buffer_head *bh, *head;
662 loff_t offset = page_offset(page);
664 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
667 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
670 xfs_alert(ip->i_mount,
671 "page discard on page %p, inode 0x%llx, offset %llu.",
672 page, ip->i_ino, offset);
674 xfs_ilock(ip, XFS_ILOCK_EXCL);
675 bh = head = page_buffers(page);
678 xfs_fileoff_t start_fsb;
680 if (!buffer_delay(bh))
683 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
684 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
686 /* something screwed, just bail */
687 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
688 xfs_alert(ip->i_mount,
689 "page discard unable to remove delalloc mapping.");
694 offset += 1 << inode->i_blkbits;
696 } while ((bh = bh->b_this_page) != head);
698 xfs_iunlock(ip, XFS_ILOCK_EXCL);
700 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
705 * We implement an immediate ioend submission policy here to avoid needing to
706 * chain multiple ioends and hence nest mempool allocations which can violate
707 * forward progress guarantees we need to provide. The current ioend we are
708 * adding buffers to is cached on the writepage context, and if the new buffer
709 * does not append to the cached ioend it will create a new ioend and cache that
712 * If a new ioend is created and cached, the old ioend is returned and queued
713 * locally for submission once the entire page is processed or an error has been
714 * detected. While ioends are submitted immediately after they are completed,
715 * batching optimisations are provided by higher level block plugging.
717 * At the end of a writeback pass, there will be a cached ioend remaining on the
718 * writepage context that the caller will need to submit.
722 struct xfs_writepage_ctx *wpc,
723 struct writeback_control *wbc,
727 __uint64_t end_offset)
729 LIST_HEAD(submit_list);
730 struct xfs_ioend *ioend, *next;
731 struct buffer_head *bh, *head;
732 ssize_t len = 1 << inode->i_blkbits;
737 bh = head = page_buffers(page);
738 offset = page_offset(page);
740 if (offset >= end_offset)
742 if (!buffer_uptodate(bh))
746 * set_page_dirty dirties all buffers in a page, independent
747 * of their state. The dirty state however is entirely
748 * meaningless for holes (!mapped && uptodate), so skip
749 * buffers covering holes here.
751 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
752 wpc->imap_valid = false;
756 if (buffer_unwritten(bh)) {
757 if (wpc->io_type != XFS_IO_UNWRITTEN) {
758 wpc->io_type = XFS_IO_UNWRITTEN;
759 wpc->imap_valid = false;
761 } else if (buffer_delay(bh)) {
762 if (wpc->io_type != XFS_IO_DELALLOC) {
763 wpc->io_type = XFS_IO_DELALLOC;
764 wpc->imap_valid = false;
766 } else if (buffer_uptodate(bh)) {
767 if (wpc->io_type != XFS_IO_OVERWRITE) {
768 wpc->io_type = XFS_IO_OVERWRITE;
769 wpc->imap_valid = false;
772 if (PageUptodate(page))
773 ASSERT(buffer_mapped(bh));
775 * This buffer is not uptodate and will not be
776 * written to disk. Ensure that we will put any
777 * subsequent writeable buffers into a new
780 wpc->imap_valid = false;
785 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
787 if (!wpc->imap_valid) {
788 error = xfs_map_blocks(inode, offset, &wpc->imap,
792 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
795 if (wpc->imap_valid) {
797 if (wpc->io_type != XFS_IO_OVERWRITE)
798 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
799 xfs_add_to_ioend(inode, bh, offset, wpc, &submit_list);
803 } while (offset += len, ((bh = bh->b_this_page) != head));
805 if (uptodate && bh == head)
806 SetPageUptodate(page);
808 ASSERT(wpc->ioend || list_empty(&submit_list));
812 * On error, we have to fail the ioend here because we have locked
813 * buffers in the ioend. If we don't do this, we'll deadlock
814 * invalidating the page as that tries to lock the buffers on the page.
815 * Also, because we may have set pages under writeback, we have to make
816 * sure we run IO completion to mark the error state of the IO
817 * appropriately, so we can't cancel the ioend directly here. That means
818 * we have to mark this page as under writeback if we included any
819 * buffers from it in the ioend chain so that completion treats it
822 * If we didn't include the page in the ioend, the on error we can
823 * simply discard and unlock it as there are no other users of the page
824 * or it's buffers right now. The caller will still need to trigger
825 * submission of outstanding ioends on the writepage context so they are
826 * treated correctly on error.
829 xfs_start_page_writeback(page, !error);
832 * Preserve the original error if there was one, otherwise catch
833 * submission errors here and propagate into subsequent ioend
836 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
839 list_del_init(&ioend->io_list);
840 error2 = xfs_submit_ioend(wbc, ioend, error);
841 if (error2 && !error)
845 xfs_aops_discard_page(page);
846 ClearPageUptodate(page);
850 * We can end up here with no error and nothing to write if we
851 * race with a partial page truncate on a sub-page block sized
852 * filesystem. In that case we need to mark the page clean.
854 xfs_start_page_writeback(page, 1);
855 end_page_writeback(page);
858 mapping_set_error(page->mapping, error);
863 * Write out a dirty page.
865 * For delalloc space on the page we need to allocate space and flush it.
866 * For unwritten space on the page we need to start the conversion to
867 * regular allocated space.
868 * For any other dirty buffer heads on the page we should flush them.
873 struct writeback_control *wbc,
876 struct xfs_writepage_ctx *wpc = data;
877 struct inode *inode = page->mapping->host;
879 __uint64_t end_offset;
882 trace_xfs_writepage(inode, page, 0, 0);
884 ASSERT(page_has_buffers(page));
887 * Refuse to write the page out if we are called from reclaim context.
889 * This avoids stack overflows when called from deeply used stacks in
890 * random callers for direct reclaim or memcg reclaim. We explicitly
891 * allow reclaim from kswapd as the stack usage there is relatively low.
893 * This should never happen except in the case of a VM regression so
896 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
901 * Given that we do not allow direct reclaim to call us, we should
902 * never be called while in a filesystem transaction.
904 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
908 * Is this page beyond the end of the file?
910 * The page index is less than the end_index, adjust the end_offset
911 * to the highest offset that this page should represent.
912 * -----------------------------------------------------
913 * | file mapping | <EOF> |
914 * -----------------------------------------------------
915 * | Page ... | Page N-2 | Page N-1 | Page N | |
916 * ^--------------------------------^----------|--------
917 * | desired writeback range | see else |
918 * ---------------------------------^------------------|
920 offset = i_size_read(inode);
921 end_index = offset >> PAGE_CACHE_SHIFT;
922 if (page->index < end_index)
923 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
926 * Check whether the page to write out is beyond or straddles
928 * -------------------------------------------------------
929 * | file mapping | <EOF> |
930 * -------------------------------------------------------
931 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
932 * ^--------------------------------^-----------|---------
934 * ---------------------------------^-----------|--------|
936 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
939 * Skip the page if it is fully outside i_size, e.g. due to a
940 * truncate operation that is in progress. We must redirty the
941 * page so that reclaim stops reclaiming it. Otherwise
942 * xfs_vm_releasepage() is called on it and gets confused.
944 * Note that the end_index is unsigned long, it would overflow
945 * if the given offset is greater than 16TB on 32-bit system
946 * and if we do check the page is fully outside i_size or not
947 * via "if (page->index >= end_index + 1)" as "end_index + 1"
948 * will be evaluated to 0. Hence this page will be redirtied
949 * and be written out repeatedly which would result in an
950 * infinite loop, the user program that perform this operation
951 * will hang. Instead, we can verify this situation by checking
952 * if the page to write is totally beyond the i_size or if it's
953 * offset is just equal to the EOF.
955 if (page->index > end_index ||
956 (page->index == end_index && offset_into_page == 0))
960 * The page straddles i_size. It must be zeroed out on each
961 * and every writepage invocation because it may be mmapped.
962 * "A file is mapped in multiples of the page size. For a file
963 * that is not a multiple of the page size, the remaining
964 * memory is zeroed when mapped, and writes to that region are
965 * not written out to the file."
967 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
969 /* Adjust the end_offset to the end of file */
973 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
976 redirty_page_for_writepage(wbc, page);
984 struct writeback_control *wbc)
986 struct xfs_writepage_ctx wpc = {
987 .io_type = XFS_IO_INVALID,
991 ret = xfs_do_writepage(page, wbc, &wpc);
993 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
999 struct address_space *mapping,
1000 struct writeback_control *wbc)
1002 struct xfs_writepage_ctx wpc = {
1003 .io_type = XFS_IO_INVALID,
1007 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1008 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1010 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1015 * Called to move a page into cleanable state - and from there
1016 * to be released. The page should already be clean. We always
1017 * have buffer heads in this call.
1019 * Returns 1 if the page is ok to release, 0 otherwise.
1026 int delalloc, unwritten;
1028 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1030 xfs_count_page_state(page, &delalloc, &unwritten);
1032 if (WARN_ON_ONCE(delalloc))
1034 if (WARN_ON_ONCE(unwritten))
1037 return try_to_free_buffers(page);
1041 * When we map a DIO buffer, we may need to attach an ioend that describes the
1042 * type of write IO we are doing. This passes to the completion function the
1043 * operations it needs to perform. If the mapping is for an overwrite wholly
1044 * within the EOF then we don't need an ioend and so we don't allocate one.
1045 * This avoids the unnecessary overhead of allocating and freeing ioends for
1046 * workloads that don't require transactions on IO completion.
1048 * If we get multiple mappings in a single IO, we might be mapping different
1049 * types. But because the direct IO can only have a single private pointer, we
1050 * need to ensure that:
1052 * a) i) the ioend spans the entire region of unwritten mappings; or
1053 * ii) the ioend spans all the mappings that cross or are beyond EOF; and
1054 * b) if it contains unwritten extents, it is *permanently* marked as such
1056 * We could do this by chaining ioends like buffered IO does, but we only
1057 * actually get one IO completion callback from the direct IO, and that spans
1058 * the entire IO regardless of how many mappings and IOs are needed to complete
1059 * the DIO. There is only going to be one reference to the ioend and its life
1060 * cycle is constrained by the DIO completion code. hence we don't need
1061 * reference counting here.
1063 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1064 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1065 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1066 * extending the file size. We won't know for sure until IO completion is run
1067 * and the actual max write offset is communicated to the IO completion
1070 * For DAX page faults, we are preparing to never see unwritten extents here,
1071 * nor should we ever extend the inode size. Hence we will soon have nothing to
1072 * do here for this case, ensuring we don't have to provide an IO completion
1073 * callback to free an ioend that we don't actually need for a fault into the
1074 * page at offset (2^63 - 1FSB) bytes.
1079 struct inode *inode,
1080 struct buffer_head *bh_result,
1081 struct xfs_bmbt_irec *imap,
1085 struct xfs_ioend *ioend;
1086 xfs_off_t size = bh_result->b_size;
1089 if (ISUNWRITTEN(imap))
1090 type = XFS_IO_UNWRITTEN;
1092 type = XFS_IO_OVERWRITE;
1094 trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
1097 ASSERT(type == XFS_IO_OVERWRITE);
1098 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1103 if (bh_result->b_private) {
1104 ioend = bh_result->b_private;
1105 ASSERT(ioend->io_size > 0);
1106 ASSERT(offset >= ioend->io_offset);
1107 if (offset + size > ioend->io_offset + ioend->io_size)
1108 ioend->io_size = offset - ioend->io_offset + size;
1110 if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
1111 ioend->io_type = XFS_IO_UNWRITTEN;
1113 trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
1114 ioend->io_size, ioend->io_type,
1116 } else if (type == XFS_IO_UNWRITTEN ||
1117 offset + size > i_size_read(inode) ||
1118 offset + size < 0) {
1119 ioend = xfs_alloc_ioend(inode, type);
1120 ioend->io_offset = offset;
1121 ioend->io_size = size;
1123 bh_result->b_private = ioend;
1124 set_buffer_defer_completion(bh_result);
1126 trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
1129 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1135 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1136 * is, so that we can avoid repeated get_blocks calls.
1138 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1139 * for blocks beyond EOF must be marked new so that sub block regions can be
1140 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1141 * was just allocated or is unwritten, otherwise the callers would overwrite
1142 * existing data with zeros. Hence we have to split the mapping into a range up
1143 * to and including EOF, and a second mapping for beyond EOF.
1147 struct inode *inode,
1149 struct buffer_head *bh_result,
1150 struct xfs_bmbt_irec *imap,
1154 xfs_off_t mapping_size;
1156 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1157 mapping_size <<= inode->i_blkbits;
1159 ASSERT(mapping_size > 0);
1160 if (mapping_size > size)
1161 mapping_size = size;
1162 if (offset < i_size_read(inode) &&
1163 offset + mapping_size >= i_size_read(inode)) {
1164 /* limit mapping to block that spans EOF */
1165 mapping_size = roundup_64(i_size_read(inode) - offset,
1166 1 << inode->i_blkbits);
1168 if (mapping_size > LONG_MAX)
1169 mapping_size = LONG_MAX;
1171 bh_result->b_size = mapping_size;
1176 struct inode *inode,
1178 struct buffer_head *bh_result,
1183 struct xfs_inode *ip = XFS_I(inode);
1184 struct xfs_mount *mp = ip->i_mount;
1185 xfs_fileoff_t offset_fsb, end_fsb;
1188 struct xfs_bmbt_irec imap;
1194 if (XFS_FORCED_SHUTDOWN(mp))
1197 offset = (xfs_off_t)iblock << inode->i_blkbits;
1198 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1199 size = bh_result->b_size;
1201 if (!create && direct && offset >= i_size_read(inode))
1205 * Direct I/O is usually done on preallocated files, so try getting
1206 * a block mapping without an exclusive lock first. For buffered
1207 * writes we already have the exclusive iolock anyway, so avoiding
1208 * a lock roundtrip here by taking the ilock exclusive from the
1209 * beginning is a useful micro optimization.
1211 if (create && !direct) {
1212 lockmode = XFS_ILOCK_EXCL;
1213 xfs_ilock(ip, lockmode);
1215 lockmode = xfs_ilock_data_map_shared(ip);
1218 ASSERT(offset <= mp->m_super->s_maxbytes);
1219 if (offset + size > mp->m_super->s_maxbytes)
1220 size = mp->m_super->s_maxbytes - offset;
1221 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1222 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1224 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1225 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1229 /* for DAX, we convert unwritten extents directly */
1232 (imap.br_startblock == HOLESTARTBLOCK ||
1233 imap.br_startblock == DELAYSTARTBLOCK) ||
1234 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1235 if (direct || xfs_get_extsz_hint(ip)) {
1237 * xfs_iomap_write_direct() expects the shared lock. It
1238 * is unlocked on return.
1240 if (lockmode == XFS_ILOCK_EXCL)
1241 xfs_ilock_demote(ip, lockmode);
1243 error = xfs_iomap_write_direct(ip, offset, size,
1251 * Delalloc reservations do not require a transaction,
1252 * we can go on without dropping the lock here. If we
1253 * are allocating a new delalloc block, make sure that
1254 * we set the new flag so that we mark the buffer new so
1255 * that we know that it is newly allocated if the write
1258 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1260 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1264 xfs_iunlock(ip, lockmode);
1266 trace_xfs_get_blocks_alloc(ip, offset, size,
1267 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1268 : XFS_IO_DELALLOC, &imap);
1269 } else if (nimaps) {
1270 trace_xfs_get_blocks_found(ip, offset, size,
1271 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1272 : XFS_IO_OVERWRITE, &imap);
1273 xfs_iunlock(ip, lockmode);
1275 trace_xfs_get_blocks_notfound(ip, offset, size);
1279 if (IS_DAX(inode) && create) {
1280 ASSERT(!ISUNWRITTEN(&imap));
1281 /* zeroing is not needed at a higher layer */
1285 /* trim mapping down to size requested */
1286 if (direct || size > (1 << inode->i_blkbits))
1287 xfs_map_trim_size(inode, iblock, bh_result,
1288 &imap, offset, size);
1291 * For unwritten extents do not report a disk address in the buffered
1292 * read case (treat as if we're reading into a hole).
1294 if (imap.br_startblock != HOLESTARTBLOCK &&
1295 imap.br_startblock != DELAYSTARTBLOCK &&
1296 (create || !ISUNWRITTEN(&imap))) {
1297 xfs_map_buffer(inode, bh_result, &imap, offset);
1298 if (ISUNWRITTEN(&imap))
1299 set_buffer_unwritten(bh_result);
1300 /* direct IO needs special help */
1301 if (create && direct)
1302 xfs_map_direct(inode, bh_result, &imap, offset,
1307 * If this is a realtime file, data may be on a different device.
1308 * to that pointed to from the buffer_head b_bdev currently.
1310 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1313 * If we previously allocated a block out beyond eof and we are now
1314 * coming back to use it then we will need to flag it as new even if it
1315 * has a disk address.
1317 * With sub-block writes into unwritten extents we also need to mark
1318 * the buffer as new so that the unwritten parts of the buffer gets
1322 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1323 (offset >= i_size_read(inode)) ||
1324 (new || ISUNWRITTEN(&imap))))
1325 set_buffer_new(bh_result);
1327 if (imap.br_startblock == DELAYSTARTBLOCK) {
1330 set_buffer_uptodate(bh_result);
1331 set_buffer_mapped(bh_result);
1332 set_buffer_delay(bh_result);
1339 xfs_iunlock(ip, lockmode);
1345 struct inode *inode,
1347 struct buffer_head *bh_result,
1350 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1354 xfs_get_blocks_direct(
1355 struct inode *inode,
1357 struct buffer_head *bh_result,
1360 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1364 xfs_get_blocks_dax_fault(
1365 struct inode *inode,
1367 struct buffer_head *bh_result,
1370 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1374 __xfs_end_io_direct_write(
1375 struct inode *inode,
1376 struct xfs_ioend *ioend,
1380 struct xfs_mount *mp = XFS_I(inode)->i_mount;
1382 if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
1386 * dio completion end_io functions are only called on writes if more
1387 * than 0 bytes was written.
1392 * The ioend only maps whole blocks, while the IO may be sector aligned.
1393 * Hence the ioend offset/size may not match the IO offset/size exactly.
1394 * Because we don't map overwrites within EOF into the ioend, the offset
1395 * may not match, but only if the endio spans EOF. Either way, write
1396 * the IO sizes into the ioend so that completion processing does the
1399 ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
1400 ioend->io_size = size;
1401 ioend->io_offset = offset;
1404 * The ioend tells us whether we are doing unwritten extent conversion
1405 * or an append transaction that updates the on-disk file size. These
1406 * cases are the only cases where we should *potentially* be needing
1407 * to update the VFS inode size.
1409 * We need to update the in-core inode size here so that we don't end up
1410 * with the on-disk inode size being outside the in-core inode size. We
1411 * have no other method of updating EOF for AIO, so always do it here
1414 * We need to lock the test/set EOF update as we can be racing with
1415 * other IO completions here to update the EOF. Failing to serialise
1416 * here can result in EOF moving backwards and Bad Things Happen when
1419 spin_lock(&XFS_I(inode)->i_flags_lock);
1420 if (offset + size > i_size_read(inode))
1421 i_size_write(inode, offset + size);
1422 spin_unlock(&XFS_I(inode)->i_flags_lock);
1425 * If we are doing an append IO that needs to update the EOF on disk,
1426 * do the transaction reserve now so we can use common end io
1427 * processing. Stashing the error (if there is one) in the ioend will
1428 * result in the ioend processing passing on the error if it is
1429 * possible as we can't return it from here.
1431 if (ioend->io_type == XFS_IO_OVERWRITE)
1432 ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
1435 xfs_end_io(&ioend->io_work);
1440 * Complete a direct I/O write request.
1442 * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
1443 * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
1444 * wholly within the EOF and so there is nothing for us to do. Note that in this
1445 * case the completion can be called in interrupt context, whereas if we have an
1446 * ioend we will always be called in task context (i.e. from a workqueue).
1449 xfs_end_io_direct_write(
1455 struct inode *inode = file_inode(iocb->ki_filp);
1456 struct xfs_ioend *ioend = private;
1458 trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
1459 ioend ? ioend->io_type : 0, NULL);
1462 ASSERT(offset + size <= i_size_read(inode));
1466 __xfs_end_io_direct_write(inode, ioend, offset, size);
1469 static inline ssize_t
1471 struct inode *inode,
1473 struct iov_iter *iter,
1475 void (*endio)(struct kiocb *iocb,
1481 struct block_device *bdev;
1484 return dax_do_io(iocb, inode, iter, offset,
1485 xfs_get_blocks_direct, endio, 0);
1487 bdev = xfs_find_bdev_for_inode(inode);
1488 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1489 xfs_get_blocks_direct, endio, NULL, flags);
1495 struct iov_iter *iter,
1498 struct inode *inode = iocb->ki_filp->f_mapping->host;
1500 if (iov_iter_rw(iter) == WRITE)
1501 return xfs_vm_do_dio(inode, iocb, iter, offset,
1502 xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
1503 return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
1507 * Punch out the delalloc blocks we have already allocated.
1509 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1510 * as the page is still locked at this point.
1513 xfs_vm_kill_delalloc_range(
1514 struct inode *inode,
1518 struct xfs_inode *ip = XFS_I(inode);
1519 xfs_fileoff_t start_fsb;
1520 xfs_fileoff_t end_fsb;
1523 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1524 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1525 if (end_fsb <= start_fsb)
1528 xfs_ilock(ip, XFS_ILOCK_EXCL);
1529 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1530 end_fsb - start_fsb);
1532 /* something screwed, just bail */
1533 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1534 xfs_alert(ip->i_mount,
1535 "xfs_vm_write_failed: unable to clean up ino %lld",
1539 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1543 xfs_vm_write_failed(
1544 struct inode *inode,
1549 loff_t block_offset;
1552 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1553 loff_t to = from + len;
1554 struct buffer_head *bh, *head;
1557 * The request pos offset might be 32 or 64 bit, this is all fine
1558 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1559 * platform, the high 32-bit will be masked off if we evaluate the
1560 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1561 * 0xfffff000 as an unsigned long, hence the result is incorrect
1562 * which could cause the following ASSERT failed in most cases.
1563 * In order to avoid this, we can evaluate the block_offset of the
1564 * start of the page by using shifts rather than masks the mismatch
1567 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1569 ASSERT(block_offset + from == pos);
1571 head = page_buffers(page);
1573 for (bh = head; bh != head || !block_start;
1574 bh = bh->b_this_page, block_start = block_end,
1575 block_offset += bh->b_size) {
1576 block_end = block_start + bh->b_size;
1578 /* skip buffers before the write */
1579 if (block_end <= from)
1582 /* if the buffer is after the write, we're done */
1583 if (block_start >= to)
1586 if (!buffer_delay(bh))
1589 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1592 xfs_vm_kill_delalloc_range(inode, block_offset,
1593 block_offset + bh->b_size);
1596 * This buffer does not contain data anymore. make sure anyone
1597 * who finds it knows that for certain.
1599 clear_buffer_delay(bh);
1600 clear_buffer_uptodate(bh);
1601 clear_buffer_mapped(bh);
1602 clear_buffer_new(bh);
1603 clear_buffer_dirty(bh);
1609 * This used to call block_write_begin(), but it unlocks and releases the page
1610 * on error, and we need that page to be able to punch stale delalloc blocks out
1611 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1612 * the appropriate point.
1617 struct address_space *mapping,
1621 struct page **pagep,
1624 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1628 ASSERT(len <= PAGE_CACHE_SIZE);
1630 page = grab_cache_page_write_begin(mapping, index, flags);
1634 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1635 if (unlikely(status)) {
1636 struct inode *inode = mapping->host;
1637 size_t isize = i_size_read(inode);
1639 xfs_vm_write_failed(inode, page, pos, len);
1643 * If the write is beyond EOF, we only want to kill blocks
1644 * allocated in this write, not blocks that were previously
1645 * written successfully.
1647 if (pos + len > isize) {
1648 ssize_t start = max_t(ssize_t, pos, isize);
1650 truncate_pagecache_range(inode, start, pos + len);
1653 page_cache_release(page);
1662 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1663 * this specific write because they will never be written. Previous writes
1664 * beyond EOF where block allocation succeeded do not need to be trashed, so
1665 * only new blocks from this write should be trashed. For blocks within
1666 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1667 * written with all the other valid data.
1672 struct address_space *mapping,
1681 ASSERT(len <= PAGE_CACHE_SIZE);
1683 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1684 if (unlikely(ret < len)) {
1685 struct inode *inode = mapping->host;
1686 size_t isize = i_size_read(inode);
1687 loff_t to = pos + len;
1690 /* only kill blocks in this write beyond EOF */
1693 xfs_vm_kill_delalloc_range(inode, isize, to);
1694 truncate_pagecache_range(inode, isize, to);
1702 struct address_space *mapping,
1705 struct inode *inode = (struct inode *)mapping->host;
1706 struct xfs_inode *ip = XFS_I(inode);
1708 trace_xfs_vm_bmap(XFS_I(inode));
1709 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1710 filemap_write_and_wait(mapping);
1711 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1712 return generic_block_bmap(mapping, block, xfs_get_blocks);
1717 struct file *unused,
1720 trace_xfs_vm_readpage(page->mapping->host, 1);
1721 return mpage_readpage(page, xfs_get_blocks);
1726 struct file *unused,
1727 struct address_space *mapping,
1728 struct list_head *pages,
1731 trace_xfs_vm_readpages(mapping->host, nr_pages);
1732 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1736 * This is basically a copy of __set_page_dirty_buffers() with one
1737 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1738 * dirty, we'll never be able to clean them because we don't write buffers
1739 * beyond EOF, and that means we can't invalidate pages that span EOF
1740 * that have been marked dirty. Further, the dirty state can leak into
1741 * the file interior if the file is extended, resulting in all sorts of
1742 * bad things happening as the state does not match the underlying data.
1744 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1745 * this only exist because of bufferheads and how the generic code manages them.
1748 xfs_vm_set_page_dirty(
1751 struct address_space *mapping = page->mapping;
1752 struct inode *inode = mapping->host;
1756 struct mem_cgroup *memcg;
1758 if (unlikely(!mapping))
1759 return !TestSetPageDirty(page);
1761 end_offset = i_size_read(inode);
1762 offset = page_offset(page);
1764 spin_lock(&mapping->private_lock);
1765 if (page_has_buffers(page)) {
1766 struct buffer_head *head = page_buffers(page);
1767 struct buffer_head *bh = head;
1770 if (offset < end_offset)
1771 set_buffer_dirty(bh);
1772 bh = bh->b_this_page;
1773 offset += 1 << inode->i_blkbits;
1774 } while (bh != head);
1777 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
1778 * per-memcg dirty page counters.
1780 memcg = mem_cgroup_begin_page_stat(page);
1781 newly_dirty = !TestSetPageDirty(page);
1782 spin_unlock(&mapping->private_lock);
1785 /* sigh - __set_page_dirty() is static, so copy it here, too */
1786 unsigned long flags;
1788 spin_lock_irqsave(&mapping->tree_lock, flags);
1789 if (page->mapping) { /* Race with truncate? */
1790 WARN_ON_ONCE(!PageUptodate(page));
1791 account_page_dirtied(page, mapping, memcg);
1792 radix_tree_tag_set(&mapping->page_tree,
1793 page_index(page), PAGECACHE_TAG_DIRTY);
1795 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1797 mem_cgroup_end_page_stat(memcg);
1799 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1803 const struct address_space_operations xfs_address_space_operations = {
1804 .readpage = xfs_vm_readpage,
1805 .readpages = xfs_vm_readpages,
1806 .writepage = xfs_vm_writepage,
1807 .writepages = xfs_vm_writepages,
1808 .set_page_dirty = xfs_vm_set_page_dirty,
1809 .releasepage = xfs_vm_releasepage,
1810 .invalidatepage = xfs_vm_invalidatepage,
1811 .write_begin = xfs_vm_write_begin,
1812 .write_end = xfs_vm_write_end,
1813 .bmap = xfs_vm_bmap,
1814 .direct_IO = xfs_vm_direct_IO,
1815 .migratepage = buffer_migrate_page,
1816 .is_partially_uptodate = block_is_partially_uptodate,
1817 .error_remove_page = generic_error_remove_page,