2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_trans.h"
23 #include "xfs_mount.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_dinode.h"
26 #include "xfs_inode.h"
27 #include "xfs_inode_item.h"
28 #include "xfs_alloc.h"
29 #include "xfs_error.h"
30 #include "xfs_iomap.h"
31 #include "xfs_vnodeops.h"
32 #include "xfs_trace.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
45 struct buffer_head *bh, *head;
47 *delalloc = *unwritten = 0;
49 bh = head = page_buffers(page);
51 if (buffer_unwritten(bh))
53 else if (buffer_delay(bh))
55 } while ((bh = bh->b_this_page) != head);
58 STATIC struct block_device *
59 xfs_find_bdev_for_inode(
62 struct xfs_inode *ip = XFS_I(inode);
63 struct xfs_mount *mp = ip->i_mount;
65 if (XFS_IS_REALTIME_INODE(ip))
66 return mp->m_rtdev_targp->bt_bdev;
68 return mp->m_ddev_targp->bt_bdev;
72 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
81 struct buffer_head *bh, *next;
83 for (bh = ioend->io_buffer_head; bh; bh = next) {
85 bh->b_end_io(bh, !ioend->io_error);
89 if (ioend->io_isasync) {
90 aio_complete(ioend->io_iocb, ioend->io_error ?
91 ioend->io_error : ioend->io_result, 0);
93 inode_dio_done(ioend->io_inode);
96 mempool_free(ioend, xfs_ioend_pool);
100 * Fast and loose check if this write could update the on-disk inode size.
102 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
104 return ioend->io_offset + ioend->io_size >
105 XFS_I(ioend->io_inode)->i_d.di_size;
109 xfs_setfilesize_trans_alloc(
110 struct xfs_ioend *ioend)
112 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
113 struct xfs_trans *tp;
116 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
118 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
120 xfs_trans_cancel(tp, 0);
124 ioend->io_append_trans = tp;
127 * We will pass freeze protection with a transaction. So tell lockdep
130 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
133 * We hand off the transaction to the completion thread now, so
134 * clear the flag here.
136 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
141 * Update on-disk file size now that data has been written to disk.
145 struct xfs_ioend *ioend)
147 struct xfs_inode *ip = XFS_I(ioend->io_inode);
148 struct xfs_trans *tp = ioend->io_append_trans;
152 * The transaction was allocated in the I/O submission thread,
153 * thus we need to mark ourselves as beeing in a transaction
156 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
158 xfs_ilock(ip, XFS_ILOCK_EXCL);
159 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
161 xfs_iunlock(ip, XFS_ILOCK_EXCL);
162 xfs_trans_cancel(tp, 0);
166 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
168 ip->i_d.di_size = isize;
169 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
170 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
172 return xfs_trans_commit(tp, 0);
176 * Schedule IO completion handling on the final put of an ioend.
178 * If there is no work to do we might as well call it a day and free the
183 struct xfs_ioend *ioend)
185 if (atomic_dec_and_test(&ioend->io_remaining)) {
186 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
188 if (ioend->io_type == XFS_IO_UNWRITTEN)
189 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
190 else if (ioend->io_append_trans)
191 queue_work(mp->m_data_workqueue, &ioend->io_work);
193 xfs_destroy_ioend(ioend);
198 * IO write completion.
202 struct work_struct *work)
204 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
205 struct xfs_inode *ip = XFS_I(ioend->io_inode);
208 if (ioend->io_append_trans) {
210 * We've got freeze protection passed with the transaction.
211 * Tell lockdep about it.
214 &ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
217 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
218 ioend->io_error = -EIO;
225 * For unwritten extents we need to issue transactions to convert a
226 * range to normal written extens after the data I/O has finished.
228 if (ioend->io_type == XFS_IO_UNWRITTEN) {
230 * For buffered I/O we never preallocate a transaction when
231 * doing the unwritten extent conversion, but for direct I/O
232 * we do not know if we are converting an unwritten extent
233 * or not at the point where we preallocate the transaction.
235 if (ioend->io_append_trans) {
236 ASSERT(ioend->io_isdirect);
238 current_set_flags_nested(
239 &ioend->io_append_trans->t_pflags, PF_FSTRANS);
240 xfs_trans_cancel(ioend->io_append_trans, 0);
243 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
246 ioend->io_error = -error;
249 } else if (ioend->io_append_trans) {
250 error = xfs_setfilesize(ioend);
252 ioend->io_error = -error;
254 ASSERT(!xfs_ioend_is_append(ioend));
258 xfs_destroy_ioend(ioend);
262 * Call IO completion handling in caller context on the final put of an ioend.
265 xfs_finish_ioend_sync(
266 struct xfs_ioend *ioend)
268 if (atomic_dec_and_test(&ioend->io_remaining))
269 xfs_end_io(&ioend->io_work);
273 * Allocate and initialise an IO completion structure.
274 * We need to track unwritten extent write completion here initially.
275 * We'll need to extend this for updating the ondisk inode size later
285 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
288 * Set the count to 1 initially, which will prevent an I/O
289 * completion callback from happening before we have started
290 * all the I/O from calling the completion routine too early.
292 atomic_set(&ioend->io_remaining, 1);
293 ioend->io_isasync = 0;
294 ioend->io_isdirect = 0;
296 ioend->io_list = NULL;
297 ioend->io_type = type;
298 ioend->io_inode = inode;
299 ioend->io_buffer_head = NULL;
300 ioend->io_buffer_tail = NULL;
301 ioend->io_offset = 0;
303 ioend->io_iocb = NULL;
304 ioend->io_result = 0;
305 ioend->io_append_trans = NULL;
307 INIT_WORK(&ioend->io_work, xfs_end_io);
315 struct xfs_bmbt_irec *imap,
319 struct xfs_inode *ip = XFS_I(inode);
320 struct xfs_mount *mp = ip->i_mount;
321 ssize_t count = 1 << inode->i_blkbits;
322 xfs_fileoff_t offset_fsb, end_fsb;
324 int bmapi_flags = XFS_BMAPI_ENTIRE;
327 if (XFS_FORCED_SHUTDOWN(mp))
328 return -XFS_ERROR(EIO);
330 if (type == XFS_IO_UNWRITTEN)
331 bmapi_flags |= XFS_BMAPI_IGSTATE;
333 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
335 return -XFS_ERROR(EAGAIN);
336 xfs_ilock(ip, XFS_ILOCK_SHARED);
339 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
340 (ip->i_df.if_flags & XFS_IFEXTENTS));
341 ASSERT(offset <= mp->m_super->s_maxbytes);
343 if (offset + count > mp->m_super->s_maxbytes)
344 count = mp->m_super->s_maxbytes - offset;
345 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
346 offset_fsb = XFS_B_TO_FSBT(mp, offset);
347 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
348 imap, &nimaps, bmapi_flags);
349 xfs_iunlock(ip, XFS_ILOCK_SHARED);
352 return -XFS_ERROR(error);
354 if (type == XFS_IO_DELALLOC &&
355 (!nimaps || isnullstartblock(imap->br_startblock))) {
356 error = xfs_iomap_write_allocate(ip, offset, count, imap);
358 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
359 return -XFS_ERROR(error);
363 if (type == XFS_IO_UNWRITTEN) {
365 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
366 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
370 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
377 struct xfs_bmbt_irec *imap,
380 offset >>= inode->i_blkbits;
382 return offset >= imap->br_startoff &&
383 offset < imap->br_startoff + imap->br_blockcount;
387 * BIO completion handler for buffered IO.
394 xfs_ioend_t *ioend = bio->bi_private;
396 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
397 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
399 /* Toss bio and pass work off to an xfsdatad thread */
400 bio->bi_private = NULL;
401 bio->bi_end_io = NULL;
404 xfs_finish_ioend(ioend);
408 xfs_submit_ioend_bio(
409 struct writeback_control *wbc,
413 atomic_inc(&ioend->io_remaining);
414 bio->bi_private = ioend;
415 bio->bi_end_io = xfs_end_bio;
416 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
421 struct buffer_head *bh)
423 int nvecs = bio_get_nr_vecs(bh->b_bdev);
424 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
426 ASSERT(bio->bi_private == NULL);
427 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
428 bio->bi_bdev = bh->b_bdev;
433 xfs_start_buffer_writeback(
434 struct buffer_head *bh)
436 ASSERT(buffer_mapped(bh));
437 ASSERT(buffer_locked(bh));
438 ASSERT(!buffer_delay(bh));
439 ASSERT(!buffer_unwritten(bh));
441 mark_buffer_async_write(bh);
442 set_buffer_uptodate(bh);
443 clear_buffer_dirty(bh);
447 xfs_start_page_writeback(
452 ASSERT(PageLocked(page));
453 ASSERT(!PageWriteback(page));
455 clear_page_dirty_for_io(page);
456 set_page_writeback(page);
458 /* If no buffers on the page are to be written, finish it here */
460 end_page_writeback(page);
463 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
465 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
469 * Submit all of the bios for all of the ioends we have saved up, covering the
470 * initial writepage page and also any probed pages.
472 * Because we may have multiple ioends spanning a page, we need to start
473 * writeback on all the buffers before we submit them for I/O. If we mark the
474 * buffers as we got, then we can end up with a page that only has buffers
475 * marked async write and I/O complete on can occur before we mark the other
476 * buffers async write.
478 * The end result of this is that we trip a bug in end_page_writeback() because
479 * we call it twice for the one page as the code in end_buffer_async_write()
480 * assumes that all buffers on the page are started at the same time.
482 * The fix is two passes across the ioend list - one to start writeback on the
483 * buffer_heads, and then submit them for I/O on the second pass.
485 * If @fail is non-zero, it means that we have a situation where some part of
486 * the submission process has failed after we have marked paged for writeback
487 * and unlocked them. In this situation, we need to fail the ioend chain rather
488 * than submit it to IO. This typically only happens on a filesystem shutdown.
492 struct writeback_control *wbc,
496 xfs_ioend_t *head = ioend;
498 struct buffer_head *bh;
500 sector_t lastblock = 0;
502 /* Pass 1 - start writeback */
504 next = ioend->io_list;
505 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
506 xfs_start_buffer_writeback(bh);
507 } while ((ioend = next) != NULL);
509 /* Pass 2 - submit I/O */
512 next = ioend->io_list;
516 * If we are failing the IO now, just mark the ioend with an
517 * error and finish it. This will run IO completion immediately
518 * as there is only one reference to the ioend at this point in
522 ioend->io_error = -fail;
523 xfs_finish_ioend(ioend);
527 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
531 bio = xfs_alloc_ioend_bio(bh);
532 } else if (bh->b_blocknr != lastblock + 1) {
533 xfs_submit_ioend_bio(wbc, ioend, bio);
537 if (bio_add_buffer(bio, bh) != bh->b_size) {
538 xfs_submit_ioend_bio(wbc, ioend, bio);
542 lastblock = bh->b_blocknr;
545 xfs_submit_ioend_bio(wbc, ioend, bio);
546 xfs_finish_ioend(ioend);
547 } while ((ioend = next) != NULL);
551 * Cancel submission of all buffer_heads so far in this endio.
552 * Toss the endio too. Only ever called for the initial page
553 * in a writepage request, so only ever one page.
560 struct buffer_head *bh, *next_bh;
563 next = ioend->io_list;
564 bh = ioend->io_buffer_head;
566 next_bh = bh->b_private;
567 clear_buffer_async_write(bh);
569 } while ((bh = next_bh) != NULL);
571 mempool_free(ioend, xfs_ioend_pool);
572 } while ((ioend = next) != NULL);
576 * Test to see if we've been building up a completion structure for
577 * earlier buffers -- if so, we try to append to this ioend if we
578 * can, otherwise we finish off any current ioend and start another.
579 * Return true if we've finished the given ioend.
584 struct buffer_head *bh,
587 xfs_ioend_t **result,
590 xfs_ioend_t *ioend = *result;
592 if (!ioend || need_ioend || type != ioend->io_type) {
593 xfs_ioend_t *previous = *result;
595 ioend = xfs_alloc_ioend(inode, type);
596 ioend->io_offset = offset;
597 ioend->io_buffer_head = bh;
598 ioend->io_buffer_tail = bh;
600 previous->io_list = ioend;
603 ioend->io_buffer_tail->b_private = bh;
604 ioend->io_buffer_tail = bh;
607 bh->b_private = NULL;
608 ioend->io_size += bh->b_size;
614 struct buffer_head *bh,
615 struct xfs_bmbt_irec *imap,
619 struct xfs_mount *m = XFS_I(inode)->i_mount;
620 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
621 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
623 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
624 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
626 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
627 ((offset - iomap_offset) >> inode->i_blkbits);
629 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
632 set_buffer_mapped(bh);
638 struct buffer_head *bh,
639 struct xfs_bmbt_irec *imap,
642 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
643 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
645 xfs_map_buffer(inode, bh, imap, offset);
646 set_buffer_mapped(bh);
647 clear_buffer_delay(bh);
648 clear_buffer_unwritten(bh);
652 * Test if a given page is suitable for writing as part of an unwritten
653 * or delayed allocate extent.
660 if (PageWriteback(page))
663 if (page->mapping && page_has_buffers(page)) {
664 struct buffer_head *bh, *head;
667 bh = head = page_buffers(page);
669 if (buffer_unwritten(bh))
670 acceptable += (type == XFS_IO_UNWRITTEN);
671 else if (buffer_delay(bh))
672 acceptable += (type == XFS_IO_DELALLOC);
673 else if (buffer_dirty(bh) && buffer_mapped(bh))
674 acceptable += (type == XFS_IO_OVERWRITE);
677 } while ((bh = bh->b_this_page) != head);
687 * Allocate & map buffers for page given the extent map. Write it out.
688 * except for the original page of a writepage, this is called on
689 * delalloc/unwritten pages only, for the original page it is possible
690 * that the page has no mapping at all.
697 struct xfs_bmbt_irec *imap,
698 xfs_ioend_t **ioendp,
699 struct writeback_control *wbc)
701 struct buffer_head *bh, *head;
702 xfs_off_t end_offset;
703 unsigned long p_offset;
706 int count = 0, done = 0, uptodate = 1;
707 xfs_off_t offset = page_offset(page);
709 if (page->index != tindex)
711 if (!trylock_page(page))
713 if (PageWriteback(page))
714 goto fail_unlock_page;
715 if (page->mapping != inode->i_mapping)
716 goto fail_unlock_page;
717 if (!xfs_check_page_type(page, (*ioendp)->io_type))
718 goto fail_unlock_page;
721 * page_dirty is initially a count of buffers on the page before
722 * EOF and is decremented as we move each into a cleanable state.
726 * End offset is the highest offset that this page should represent.
727 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
728 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
729 * hence give us the correct page_dirty count. On any other page,
730 * it will be zero and in that case we need page_dirty to be the
731 * count of buffers on the page.
733 end_offset = min_t(unsigned long long,
734 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
737 len = 1 << inode->i_blkbits;
738 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
740 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
741 page_dirty = p_offset / len;
743 bh = head = page_buffers(page);
745 if (offset >= end_offset)
747 if (!buffer_uptodate(bh))
749 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
754 if (buffer_unwritten(bh) || buffer_delay(bh) ||
756 if (buffer_unwritten(bh))
757 type = XFS_IO_UNWRITTEN;
758 else if (buffer_delay(bh))
759 type = XFS_IO_DELALLOC;
761 type = XFS_IO_OVERWRITE;
763 if (!xfs_imap_valid(inode, imap, offset)) {
769 if (type != XFS_IO_OVERWRITE)
770 xfs_map_at_offset(inode, bh, imap, offset);
771 xfs_add_to_ioend(inode, bh, offset, type,
779 } while (offset += len, (bh = bh->b_this_page) != head);
781 if (uptodate && bh == head)
782 SetPageUptodate(page);
785 if (--wbc->nr_to_write <= 0 &&
786 wbc->sync_mode == WB_SYNC_NONE)
789 xfs_start_page_writeback(page, !page_dirty, count);
799 * Convert & write out a cluster of pages in the same extent as defined
800 * by mp and following the start page.
806 struct xfs_bmbt_irec *imap,
807 xfs_ioend_t **ioendp,
808 struct writeback_control *wbc,
814 pagevec_init(&pvec, 0);
815 while (!done && tindex <= tlast) {
816 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
818 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
821 for (i = 0; i < pagevec_count(&pvec); i++) {
822 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
828 pagevec_release(&pvec);
834 xfs_vm_invalidatepage(
836 unsigned long offset)
838 trace_xfs_invalidatepage(page->mapping->host, page, offset);
839 block_invalidatepage(page, offset);
843 * If the page has delalloc buffers on it, we need to punch them out before we
844 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
845 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
846 * is done on that same region - the delalloc extent is returned when none is
847 * supposed to be there.
849 * We prevent this by truncating away the delalloc regions on the page before
850 * invalidating it. Because they are delalloc, we can do this without needing a
851 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
852 * truncation without a transaction as there is no space left for block
853 * reservation (typically why we see a ENOSPC in writeback).
855 * This is not a performance critical path, so for now just do the punching a
856 * buffer head at a time.
859 xfs_aops_discard_page(
862 struct inode *inode = page->mapping->host;
863 struct xfs_inode *ip = XFS_I(inode);
864 struct buffer_head *bh, *head;
865 loff_t offset = page_offset(page);
867 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
870 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
873 xfs_alert(ip->i_mount,
874 "page discard on page %p, inode 0x%llx, offset %llu.",
875 page, ip->i_ino, offset);
877 xfs_ilock(ip, XFS_ILOCK_EXCL);
878 bh = head = page_buffers(page);
881 xfs_fileoff_t start_fsb;
883 if (!buffer_delay(bh))
886 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
887 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
889 /* something screwed, just bail */
890 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
891 xfs_alert(ip->i_mount,
892 "page discard unable to remove delalloc mapping.");
897 offset += 1 << inode->i_blkbits;
899 } while ((bh = bh->b_this_page) != head);
901 xfs_iunlock(ip, XFS_ILOCK_EXCL);
903 xfs_vm_invalidatepage(page, 0);
908 * Write out a dirty page.
910 * For delalloc space on the page we need to allocate space and flush it.
911 * For unwritten space on the page we need to start the conversion to
912 * regular allocated space.
913 * For any other dirty buffer heads on the page we should flush them.
918 struct writeback_control *wbc)
920 struct inode *inode = page->mapping->host;
921 struct buffer_head *bh, *head;
922 struct xfs_bmbt_irec imap;
923 xfs_ioend_t *ioend = NULL, *iohead = NULL;
926 __uint64_t end_offset;
927 pgoff_t end_index, last_index;
929 int err, imap_valid = 0, uptodate = 1;
933 trace_xfs_writepage(inode, page, 0);
935 ASSERT(page_has_buffers(page));
938 * Refuse to write the page out if we are called from reclaim context.
940 * This avoids stack overflows when called from deeply used stacks in
941 * random callers for direct reclaim or memcg reclaim. We explicitly
942 * allow reclaim from kswapd as the stack usage there is relatively low.
944 * This should never happen except in the case of a VM regression so
947 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
952 * Given that we do not allow direct reclaim to call us, we should
953 * never be called while in a filesystem transaction.
955 if (WARN_ON(current->flags & PF_FSTRANS))
958 /* Is this page beyond the end of the file? */
959 offset = i_size_read(inode);
960 end_index = offset >> PAGE_CACHE_SHIFT;
961 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
962 if (page->index >= end_index) {
963 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
966 * Just skip the page if it is fully outside i_size, e.g. due
967 * to a truncate operation that is in progress.
969 if (page->index >= end_index + 1 || offset_into_page == 0) {
975 * The page straddles i_size. It must be zeroed out on each
976 * and every writepage invocation because it may be mmapped.
977 * "A file is mapped in multiples of the page size. For a file
978 * that is not a multiple of the page size, the remaining
979 * memory is zeroed when mapped, and writes to that region are
980 * not written out to the file."
982 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
985 end_offset = min_t(unsigned long long,
986 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
988 len = 1 << inode->i_blkbits;
990 bh = head = page_buffers(page);
991 offset = page_offset(page);
992 type = XFS_IO_OVERWRITE;
994 if (wbc->sync_mode == WB_SYNC_NONE)
1000 if (offset >= end_offset)
1002 if (!buffer_uptodate(bh))
1006 * set_page_dirty dirties all buffers in a page, independent
1007 * of their state. The dirty state however is entirely
1008 * meaningless for holes (!mapped && uptodate), so skip
1009 * buffers covering holes here.
1011 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1016 if (buffer_unwritten(bh)) {
1017 if (type != XFS_IO_UNWRITTEN) {
1018 type = XFS_IO_UNWRITTEN;
1021 } else if (buffer_delay(bh)) {
1022 if (type != XFS_IO_DELALLOC) {
1023 type = XFS_IO_DELALLOC;
1026 } else if (buffer_uptodate(bh)) {
1027 if (type != XFS_IO_OVERWRITE) {
1028 type = XFS_IO_OVERWRITE;
1032 if (PageUptodate(page))
1033 ASSERT(buffer_mapped(bh));
1035 * This buffer is not uptodate and will not be
1036 * written to disk. Ensure that we will put any
1037 * subsequent writeable buffers into a new
1045 imap_valid = xfs_imap_valid(inode, &imap, offset);
1048 * If we didn't have a valid mapping then we need to
1049 * put the new mapping into a separate ioend structure.
1050 * This ensures non-contiguous extents always have
1051 * separate ioends, which is particularly important
1052 * for unwritten extent conversion at I/O completion
1056 err = xfs_map_blocks(inode, offset, &imap, type,
1060 imap_valid = xfs_imap_valid(inode, &imap, offset);
1064 if (type != XFS_IO_OVERWRITE)
1065 xfs_map_at_offset(inode, bh, &imap, offset);
1066 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1074 } while (offset += len, ((bh = bh->b_this_page) != head));
1076 if (uptodate && bh == head)
1077 SetPageUptodate(page);
1079 xfs_start_page_writeback(page, 1, count);
1081 /* if there is no IO to be submitted for this page, we are done */
1088 * Any errors from this point onwards need tobe reported through the IO
1089 * completion path as we have marked the initial page as under writeback
1093 xfs_off_t end_index;
1095 end_index = imap.br_startoff + imap.br_blockcount;
1098 end_index <<= inode->i_blkbits;
1101 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1103 /* check against file size */
1104 if (end_index > last_index)
1105 end_index = last_index;
1107 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1113 * Reserve log space if we might write beyond the on-disk inode size.
1116 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1117 err = xfs_setfilesize_trans_alloc(ioend);
1119 xfs_submit_ioend(wbc, iohead, err);
1125 xfs_cancel_ioend(iohead);
1130 xfs_aops_discard_page(page);
1131 ClearPageUptodate(page);
1136 redirty_page_for_writepage(wbc, page);
1143 struct address_space *mapping,
1144 struct writeback_control *wbc)
1146 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1147 return generic_writepages(mapping, wbc);
1151 * Called to move a page into cleanable state - and from there
1152 * to be released. The page should already be clean. We always
1153 * have buffer heads in this call.
1155 * Returns 1 if the page is ok to release, 0 otherwise.
1162 int delalloc, unwritten;
1164 trace_xfs_releasepage(page->mapping->host, page, 0);
1166 xfs_count_page_state(page, &delalloc, &unwritten);
1168 if (WARN_ON(delalloc))
1170 if (WARN_ON(unwritten))
1173 return try_to_free_buffers(page);
1178 struct inode *inode,
1180 struct buffer_head *bh_result,
1184 struct xfs_inode *ip = XFS_I(inode);
1185 struct xfs_mount *mp = ip->i_mount;
1186 xfs_fileoff_t offset_fsb, end_fsb;
1189 struct xfs_bmbt_irec imap;
1195 if (XFS_FORCED_SHUTDOWN(mp))
1196 return -XFS_ERROR(EIO);
1198 offset = (xfs_off_t)iblock << inode->i_blkbits;
1199 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1200 size = bh_result->b_size;
1202 if (!create && direct && offset >= i_size_read(inode))
1206 * Direct I/O is usually done on preallocated files, so try getting
1207 * a block mapping without an exclusive lock first. For buffered
1208 * writes we already have the exclusive iolock anyway, so avoiding
1209 * a lock roundtrip here by taking the ilock exclusive from the
1210 * beginning is a useful micro optimization.
1212 if (create && !direct) {
1213 lockmode = XFS_ILOCK_EXCL;
1214 xfs_ilock(ip, lockmode);
1216 lockmode = xfs_ilock_map_shared(ip);
1219 ASSERT(offset <= mp->m_super->s_maxbytes);
1220 if (offset + size > mp->m_super->s_maxbytes)
1221 size = mp->m_super->s_maxbytes - offset;
1222 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1223 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1225 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1226 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1232 (imap.br_startblock == HOLESTARTBLOCK ||
1233 imap.br_startblock == DELAYSTARTBLOCK))) {
1234 if (direct || xfs_get_extsz_hint(ip)) {
1236 * Drop the ilock in preparation for starting the block
1237 * allocation transaction. It will be retaken
1238 * exclusively inside xfs_iomap_write_direct for the
1239 * actual allocation.
1241 xfs_iunlock(ip, lockmode);
1242 error = xfs_iomap_write_direct(ip, offset, size,
1249 * Delalloc reservations do not require a transaction,
1250 * we can go on without dropping the lock here. If we
1251 * are allocating a new delalloc block, make sure that
1252 * we set the new flag so that we mark the buffer new so
1253 * that we know that it is newly allocated if the write
1256 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1258 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1262 xfs_iunlock(ip, lockmode);
1265 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1266 } else if (nimaps) {
1267 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1268 xfs_iunlock(ip, lockmode);
1270 trace_xfs_get_blocks_notfound(ip, offset, size);
1274 if (imap.br_startblock != HOLESTARTBLOCK &&
1275 imap.br_startblock != DELAYSTARTBLOCK) {
1277 * For unwritten extents do not report a disk address on
1278 * the read case (treat as if we're reading into a hole).
1280 if (create || !ISUNWRITTEN(&imap))
1281 xfs_map_buffer(inode, bh_result, &imap, offset);
1282 if (create && ISUNWRITTEN(&imap)) {
1284 bh_result->b_private = inode;
1285 set_buffer_unwritten(bh_result);
1290 * If this is a realtime file, data may be on a different device.
1291 * to that pointed to from the buffer_head b_bdev currently.
1293 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1296 * If we previously allocated a block out beyond eof and we are now
1297 * coming back to use it then we will need to flag it as new even if it
1298 * has a disk address.
1300 * With sub-block writes into unwritten extents we also need to mark
1301 * the buffer as new so that the unwritten parts of the buffer gets
1305 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1306 (offset >= i_size_read(inode)) ||
1307 (new || ISUNWRITTEN(&imap))))
1308 set_buffer_new(bh_result);
1310 if (imap.br_startblock == DELAYSTARTBLOCK) {
1313 set_buffer_uptodate(bh_result);
1314 set_buffer_mapped(bh_result);
1315 set_buffer_delay(bh_result);
1320 * If this is O_DIRECT or the mpage code calling tell them how large
1321 * the mapping is, so that we can avoid repeated get_blocks calls.
1323 if (direct || size > (1 << inode->i_blkbits)) {
1324 xfs_off_t mapping_size;
1326 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1327 mapping_size <<= inode->i_blkbits;
1329 ASSERT(mapping_size > 0);
1330 if (mapping_size > size)
1331 mapping_size = size;
1332 if (mapping_size > LONG_MAX)
1333 mapping_size = LONG_MAX;
1335 bh_result->b_size = mapping_size;
1341 xfs_iunlock(ip, lockmode);
1347 struct inode *inode,
1349 struct buffer_head *bh_result,
1352 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1356 xfs_get_blocks_direct(
1357 struct inode *inode,
1359 struct buffer_head *bh_result,
1362 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1366 * Complete a direct I/O write request.
1368 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1369 * need to issue a transaction to convert the range from unwritten to written
1370 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1371 * to do this and we are done. But in case this was a successful AIO
1372 * request this handler is called from interrupt context, from which we
1373 * can't start transactions. In that case offload the I/O completion to
1374 * the workqueues we also use for buffered I/O completion.
1377 xfs_end_io_direct_write(
1385 struct xfs_ioend *ioend = iocb->private;
1388 * While the generic direct I/O code updates the inode size, it does
1389 * so only after the end_io handler is called, which means our
1390 * end_io handler thinks the on-disk size is outside the in-core
1391 * size. To prevent this just update it a little bit earlier here.
1393 if (offset + size > i_size_read(ioend->io_inode))
1394 i_size_write(ioend->io_inode, offset + size);
1397 * blockdev_direct_IO can return an error even after the I/O
1398 * completion handler was called. Thus we need to protect
1399 * against double-freeing.
1401 iocb->private = NULL;
1403 ioend->io_offset = offset;
1404 ioend->io_size = size;
1405 ioend->io_iocb = iocb;
1406 ioend->io_result = ret;
1407 if (private && size > 0)
1408 ioend->io_type = XFS_IO_UNWRITTEN;
1411 ioend->io_isasync = 1;
1412 xfs_finish_ioend(ioend);
1414 xfs_finish_ioend_sync(ioend);
1422 const struct iovec *iov,
1424 unsigned long nr_segs)
1426 struct inode *inode = iocb->ki_filp->f_mapping->host;
1427 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1428 struct xfs_ioend *ioend = NULL;
1432 size_t size = iov_length(iov, nr_segs);
1435 * We need to preallocate a transaction for a size update
1436 * here. In the case that this write both updates the size
1437 * and converts at least on unwritten extent we will cancel
1438 * the still clean transaction after the I/O has finished.
1440 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1441 if (offset + size > XFS_I(inode)->i_d.di_size) {
1442 ret = xfs_setfilesize_trans_alloc(ioend);
1444 goto out_destroy_ioend;
1445 ioend->io_isdirect = 1;
1448 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1450 xfs_get_blocks_direct,
1451 xfs_end_io_direct_write, NULL, 0);
1452 if (ret != -EIOCBQUEUED && iocb->private)
1453 goto out_trans_cancel;
1455 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1457 xfs_get_blocks_direct,
1464 if (ioend->io_append_trans) {
1465 current_set_flags_nested(&ioend->io_append_trans->t_pflags,
1468 &inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
1470 xfs_trans_cancel(ioend->io_append_trans, 0);
1473 xfs_destroy_ioend(ioend);
1478 * Punch out the delalloc blocks we have already allocated.
1480 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1481 * as the page is still locked at this point.
1484 xfs_vm_kill_delalloc_range(
1485 struct inode *inode,
1489 struct xfs_inode *ip = XFS_I(inode);
1490 xfs_fileoff_t start_fsb;
1491 xfs_fileoff_t end_fsb;
1494 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1495 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1496 if (end_fsb <= start_fsb)
1499 xfs_ilock(ip, XFS_ILOCK_EXCL);
1500 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1501 end_fsb - start_fsb);
1503 /* something screwed, just bail */
1504 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1505 xfs_alert(ip->i_mount,
1506 "xfs_vm_write_failed: unable to clean up ino %lld",
1510 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1514 xfs_vm_write_failed(
1515 struct inode *inode,
1520 loff_t block_offset = pos & PAGE_MASK;
1523 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1524 loff_t to = from + len;
1525 struct buffer_head *bh, *head;
1527 ASSERT(block_offset + from == pos);
1529 head = page_buffers(page);
1531 for (bh = head; bh != head || !block_start;
1532 bh = bh->b_this_page, block_start = block_end,
1533 block_offset += bh->b_size) {
1534 block_end = block_start + bh->b_size;
1536 /* skip buffers before the write */
1537 if (block_end <= from)
1540 /* if the buffer is after the write, we're done */
1541 if (block_start >= to)
1544 if (!buffer_delay(bh))
1547 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1550 xfs_vm_kill_delalloc_range(inode, block_offset,
1551 block_offset + bh->b_size);
1557 * This used to call block_write_begin(), but it unlocks and releases the page
1558 * on error, and we need that page to be able to punch stale delalloc blocks out
1559 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1560 * the appropriate point.
1565 struct address_space *mapping,
1569 struct page **pagep,
1572 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1576 ASSERT(len <= PAGE_CACHE_SIZE);
1578 page = grab_cache_page_write_begin(mapping, index,
1579 flags | AOP_FLAG_NOFS);
1583 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1584 if (unlikely(status)) {
1585 struct inode *inode = mapping->host;
1587 xfs_vm_write_failed(inode, page, pos, len);
1590 if (pos + len > i_size_read(inode))
1591 truncate_pagecache(inode, pos + len, i_size_read(inode));
1593 page_cache_release(page);
1602 * On failure, we only need to kill delalloc blocks beyond EOF because they
1603 * will never be written. For blocks within EOF, generic_write_end() zeros them
1604 * so they are safe to leave alone and be written with all the other valid data.
1609 struct address_space *mapping,
1618 ASSERT(len <= PAGE_CACHE_SIZE);
1620 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1621 if (unlikely(ret < len)) {
1622 struct inode *inode = mapping->host;
1623 size_t isize = i_size_read(inode);
1624 loff_t to = pos + len;
1627 truncate_pagecache(inode, to, isize);
1628 xfs_vm_kill_delalloc_range(inode, isize, to);
1636 struct address_space *mapping,
1639 struct inode *inode = (struct inode *)mapping->host;
1640 struct xfs_inode *ip = XFS_I(inode);
1642 trace_xfs_vm_bmap(XFS_I(inode));
1643 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1644 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1645 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1646 return generic_block_bmap(mapping, block, xfs_get_blocks);
1651 struct file *unused,
1654 return mpage_readpage(page, xfs_get_blocks);
1659 struct file *unused,
1660 struct address_space *mapping,
1661 struct list_head *pages,
1664 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1667 const struct address_space_operations xfs_address_space_operations = {
1668 .readpage = xfs_vm_readpage,
1669 .readpages = xfs_vm_readpages,
1670 .writepage = xfs_vm_writepage,
1671 .writepages = xfs_vm_writepages,
1672 .releasepage = xfs_vm_releasepage,
1673 .invalidatepage = xfs_vm_invalidatepage,
1674 .write_begin = xfs_vm_write_begin,
1675 .write_end = xfs_vm_write_end,
1676 .bmap = xfs_vm_bmap,
1677 .direct_IO = xfs_vm_direct_IO,
1678 .migratepage = buffer_migrate_page,
1679 .is_partially_uptodate = block_is_partially_uptodate,
1680 .error_remove_page = generic_error_remove_page,